repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Srisai85/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
toobaz/pandas | pandas/tests/io/json/test_pandas.py | 2 | 57735 | from collections import OrderedDict
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
ensure_clean,
network,
)
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=["D", "C", "B", "A"])
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
class TestPandasContainer:
@pytest.fixture(scope="function", autouse=True)
def setup(self, datapath):
self.dirpath = datapath("io", "json", "data")
self.ts = tm.makeTimeSeries()
self.ts.name = "ts"
self.series = tm.makeStringSeries()
self.series.name = "series"
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = "objects"
self.empty_series = Series([], index=[])
self.empty_frame = DataFrame()
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.dirpath
del self.ts
del self.series
del self.objSeries
del self.empty_series
del self.empty_frame
del self.frame
del self.frame2
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
assert_frame_equal(df, read_json(df.to_json(orient="split"), orient="split"))
assert_frame_equal(
df, read_json(df.to_json(orient="columns"), orient="columns")
)
assert_frame_equal(df, read_json(df.to_json(orient="index"), orient="index"))
df_unser = read_json(df.to_json(orient="records"), orient="records")
assert_index_equal(df.columns, df_unser.columns)
tm.assert_numpy_array_equal(df.values, df_unser.values)
def test_frame_non_unique_index(self):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = "DataFrame index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="index")
msg = "DataFrame index must be unique for orient='columns'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="columns")
assert_frame_equal(df, read_json(df.to_json(orient="split"), orient="split"))
unser = read_json(df.to_json(orient="records"), orient="records")
tm.assert_index_equal(df.columns, unser.columns)
tm.assert_almost_equal(df.values, unser.values)
unser = read_json(df.to_json(orient="values"), orient="values")
tm.assert_numpy_array_equal(df.values, unser.values)
def test_frame_non_unique_columns(self):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = "DataFrame columns must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="index")
msg = "DataFrame columns must be unique for orient='columns'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="columns")
msg = "DataFrame columns must be unique for orient='records'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="records")
assert_frame_equal(
df, read_json(df.to_json(orient="split"), orient="split", dtype=False)
)
unser = read_json(df.to_json(orient="values"), orient="values")
tm.assert_numpy_array_equal(df.values, unser.values)
# GH4377; duplicate columns not processing correctly
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "y"])
result = read_json(df.to_json(orient="split"), orient="split")
assert_frame_equal(result, df)
def _check(df):
result = read_json(
df.to_json(orient="split"), orient="split", convert_dates=["x"]
)
assert_frame_equal(result, df)
for o in [
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
]:
_check(DataFrame(o, index=[1, 2], columns=["x", "x"]))
def test_frame_from_json_to_json(self):
def _check_orient(
df,
orient,
dtype=None,
numpy=False,
convert_axes=True,
check_dtype=True,
raise_ok=None,
sort=None,
check_index_type=True,
check_column_type=True,
check_numpy_dtype=False,
):
if sort is not None:
df = df.sort_values(sort)
else:
df = df.sort_index()
# if we are not unique, then check that we are raising ValueError
# for the appropriate orients
if not df.index.is_unique and orient in ["index", "columns"]:
msg = "DataFrame index must be unique for orient='{}'".format(orient)
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
return
if not df.columns.is_unique and orient in ["index", "columns", "records"]:
# TODO: not executed. fix this.
with pytest.raises(ValueError, match="ksjkajksfjksjfkjs"):
df.to_json(orient=orient)
return
dfjson = df.to_json(orient=orient)
try:
unser = read_json(
dfjson,
orient=orient,
dtype=dtype,
numpy=numpy,
convert_axes=convert_axes,
)
except Exception as detail:
if raise_ok is not None:
if isinstance(detail, raise_ok):
return
raise
if sort is not None and sort in unser.columns:
unser = unser.sort_values(sort)
else:
unser = unser.sort_index()
if not dtype:
check_dtype = False
if not convert_axes and df.index.dtype.type == np.datetime64:
unser.index = DatetimeIndex(unser.index.values.astype("i8") * 1e6)
if orient == "records":
# index is not captured in this orientation
tm.assert_almost_equal(
df.values, unser.values, check_dtype=check_numpy_dtype
)
tm.assert_index_equal(
df.columns, unser.columns, exact=check_column_type
)
elif orient == "values":
# index and cols are not captured in this orientation
if numpy is True and df.shape == (0, 0):
assert unser.shape[0] == 0
else:
tm.assert_almost_equal(
df.values, unser.values, check_dtype=check_numpy_dtype
)
elif orient == "split":
# index and col labels might not be strings
unser.index = [str(i) for i in unser.index]
unser.columns = [str(i) for i in unser.columns]
if sort is None:
unser = unser.sort_index()
tm.assert_almost_equal(
df.values, unser.values, check_dtype=check_numpy_dtype
)
else:
if convert_axes:
tm.assert_frame_equal(
df,
unser,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_column_type=check_column_type,
)
else:
tm.assert_frame_equal(
df, unser, check_less_precise=False, check_dtype=check_dtype
)
def _check_all_orients(
df,
dtype=None,
convert_axes=True,
raise_ok=None,
sort=None,
check_index_type=True,
check_column_type=True,
):
# numpy=False
if convert_axes:
_check_orient(
df,
"columns",
dtype=dtype,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"records",
dtype=dtype,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"split",
dtype=dtype,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"index",
dtype=dtype,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"values",
dtype=dtype,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(df, "columns", dtype=dtype, convert_axes=False, sort=sort)
_check_orient(df, "records", dtype=dtype, convert_axes=False, sort=sort)
_check_orient(df, "split", dtype=dtype, convert_axes=False, sort=sort)
_check_orient(df, "index", dtype=dtype, convert_axes=False, sort=sort)
_check_orient(df, "values", dtype=dtype, convert_axes=False, sort=sort)
# numpy=True and raise_ok might be not None, so ignore the error
if convert_axes:
_check_orient(
df,
"columns",
dtype=dtype,
numpy=True,
raise_ok=raise_ok,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"records",
dtype=dtype,
numpy=True,
raise_ok=raise_ok,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"split",
dtype=dtype,
numpy=True,
raise_ok=raise_ok,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"index",
dtype=dtype,
numpy=True,
raise_ok=raise_ok,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"values",
dtype=dtype,
numpy=True,
raise_ok=raise_ok,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"columns",
dtype=dtype,
numpy=True,
convert_axes=False,
raise_ok=raise_ok,
sort=sort,
)
_check_orient(
df,
"records",
dtype=dtype,
numpy=True,
convert_axes=False,
raise_ok=raise_ok,
sort=sort,
)
_check_orient(
df,
"split",
dtype=dtype,
numpy=True,
convert_axes=False,
raise_ok=raise_ok,
sort=sort,
)
_check_orient(
df,
"index",
dtype=dtype,
numpy=True,
convert_axes=False,
raise_ok=raise_ok,
sort=sort,
)
_check_orient(
df,
"values",
dtype=dtype,
numpy=True,
convert_axes=False,
raise_ok=raise_ok,
sort=sort,
)
# basic
_check_all_orients(self.frame)
assert self.frame.to_json() == self.frame.to_json(orient="columns")
_check_all_orients(self.intframe, dtype=self.intframe.values.dtype)
_check_all_orients(self.intframe, dtype=False)
# big one
# index and columns are strings as all unserialised JSON object keys
# are assumed to be strings
biggie = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
)
_check_all_orients(biggie, dtype=False, convert_axes=False)
# dtypes
_check_all_orients(
DataFrame(biggie, dtype=np.float64), dtype=np.float64, convert_axes=False
)
_check_all_orients(
DataFrame(biggie, dtype=np.int), dtype=np.int, convert_axes=False
)
_check_all_orients(
DataFrame(biggie, dtype="U3"),
dtype="U3",
convert_axes=False,
raise_ok=ValueError,
)
# categorical
_check_all_orients(self.categorical, sort="sort", raise_ok=ValueError)
# empty
_check_all_orients(
self.empty_frame, check_index_type=False, check_column_type=False
)
# time series data
_check_all_orients(self.tsframe)
# mixed data
index = pd.Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df = DataFrame(data=data, index=index)
_check_orient(df, "split", check_dtype=False)
_check_orient(df, "records", check_dtype=False)
_check_orient(df, "values", check_dtype=False)
_check_orient(df, "columns", check_dtype=False)
# index oriented is problematic as it is read back in in a transposed
# state, so the columns are interpreted as having mixed data and
# given object dtypes.
# force everything to have object dtype beforehand
_check_orient(df.transpose().transpose(), "index", dtype=False)
def test_frame_from_json_bad_data(self):
with pytest.raises(ValueError, match="Expected object or value"):
read_json(StringIO('{"key":b:a:d}'))
# too few indices
json = StringIO(
'{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}'
)
msg = r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)"
with pytest.raises(ValueError, match=msg):
read_json(json, orient="split")
# too many columns
json = StringIO(
'{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}'
)
msg = "3 columns passed, passed data had 2 columns"
with pytest.raises(ValueError, match=msg):
read_json(json, orient="split")
# bad key
json = StringIO(
'{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}'
)
with pytest.raises(ValueError, match=r"unexpected key\(s\): badkey"):
read_json(json, orient="split")
def test_frame_from_json_nones(self):
df = DataFrame([[1, 2], [4, 5, 6]])
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
df = DataFrame([["1", "2"], ["4", "5", "6"]])
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert unser[2][0] is None
unser = read_json(df.to_json(), convert_axes=False, dtype=False)
assert unser["2"]["0"] is None
unser = read_json(df.to_json(), numpy=False)
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), numpy=False, dtype=False)
assert unser[2][0] is None
unser = read_json(df.to_json(), numpy=False, convert_axes=False, dtype=False)
assert unser["2"]["0"] is None
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = np.inf
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert np.isnan(unser[2][0])
df.loc[0, 2] = np.NINF
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert np.isnan(unser[2][0])
@pytest.mark.skipif(
is_platform_32bit(), reason="not compliant on 32-bit, xref #15865"
)
def test_frame_to_json_float_precision(self):
df = pd.DataFrame([dict(a_float=0.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=1.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":2.0}}'
df = pd.DataFrame([dict(a_float=-1.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":-2.0}}'
df = pd.DataFrame([dict(a_float=0.995)])
encoded = df.to_json(double_precision=2)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=0.9995)])
encoded = df.to_json(double_precision=3)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=0.99999999999999944)])
encoded = df.to_json(double_precision=15)
assert encoded == '{"a_float":{"0":1.0}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=["jim", "joe"])
assert not df._is_mixed_type
assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
# GH 7445
result = pd.DataFrame({"test": []}, index=[]).to_json(orient="columns")
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=["jim", "joe"])
df["joe"] = df["joe"].astype("i8")
assert df._is_mixed_type
assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
def test_frame_mixedtype_orient(self): # GH10289
vals = [
[10, 1, "foo", 0.1, 0.01],
[20, 2, "bar", 0.2, 0.02],
[30, 3, "baz", 0.3, 0.03],
[40, 4, "qux", 0.4, 0.04],
]
df = DataFrame(
vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
)
assert df._is_mixed_type
right = df.copy()
for orient in ["split", "index", "columns"]:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient="records")
left = read_json(inp, orient="records", convert_axes=False)
assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient="values")
left = read_json(inp, orient="values", convert_axes=False)
assert_frame_equal(left, right)
def test_v12_compat(self):
df = DataFrame(
[
[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478],
],
columns=["A", "B", "C", "D"],
index=pd.date_range("2000-01-03", "2000-01-07"),
)
df["date"] = pd.Timestamp("19920106 18:21:32.12")
df.iloc[3, df.columns.get_loc("date")] = pd.Timestamp("20130101")
df["modified"] = df["date"]
df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
v12_json = os.path.join(self.dirpath, "tsframe_v012.json")
df_unser = pd.read_json(v12_json)
assert_frame_equal(df, df_unser)
df_iso = df.drop(["modified"], axis=1)
v12_iso_json = os.path.join(self.dirpath, "tsframe_iso_v012.json")
df_unser_iso = pd.read_json(v12_iso_json)
assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range("20000101", periods=10, freq="H")
df_mixed = DataFrame(
OrderedDict(
float_1=[
-0.92077639,
0.77434435,
1.25234727,
0.61485564,
-0.60316077,
0.24653374,
0.28668979,
-2.51969012,
0.95748401,
-1.02970536,
],
int_1=[
19680418,
75337055,
99973684,
65103179,
79373900,
40314334,
21290235,
4991321,
41903419,
16008365,
],
str_1=[
"78c608f1",
"64a99743",
"13d2ff52",
"ca7f4af2",
"97236474",
"bde7e214",
"1a6bde47",
"b1190be5",
"7a669144",
"8d64d068",
],
float_2=[
-0.0428278,
-1.80872357,
3.36042349,
-0.7573685,
-0.48217572,
0.86229683,
1.08935819,
0.93898739,
-0.03030452,
1.43366348,
],
str_2=[
"14f04af9",
"d085da90",
"4bcfac83",
"81504caf",
"2ffef4a9",
"08e2f5c4",
"07e1af03",
"addbd4a7",
"1f6a09ba",
"4bfc4d87",
],
int_2=[
86967717,
98098830,
51927505,
20372254,
12601730,
20884027,
34193846,
10561746,
24867120,
76131025,
],
),
index=index,
)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype("unicode")
df_roundtrip = pd.read_json(df_mixed.to_json(orient="split"), orient="split")
assert_frame_equal(
df_mixed,
df_roundtrip,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
by_blocks=True,
check_exact=True,
)
def test_frame_nonprintable_bytes(self):
# GH14256: failing column caused segfaults, if it is not the last one
class BinaryThing:
def __init__(self, hexed):
self.hexed = hexed
self.binary = bytes.fromhex(hexed)
def __str__(self):
return self.hexed
hexed = "574b4454ba8c5eb4f98a8f45"
binthing = BinaryThing(hexed)
# verify the proper conversion of printable content
df_printable = DataFrame({"A": [binthing.hexed]})
assert df_printable.to_json() == '{{"A":{{"0":"{hex}"}}}}'.format(hex=hexed)
# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({"A": [binthing]})
msg = "Unsupported UTF-8 sequence length when encoding string"
with pytest.raises(OverflowError, match=msg):
df_nonprintable.to_json()
# the same with multiple columns threw segfaults
df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])
with pytest.raises(OverflowError):
df_mixed.to_json()
# default_handler should resolve exceptions for non-string types
assert df_nonprintable.to_json(
default_handler=str
) == '{{"A":{{"0":"{hex}"}}}}'.format(hex=hexed)
assert df_mixed.to_json(
default_handler=str
) == '{{"A":{{"0":"{hex}"}},"B":{{"0":1}}}}'.format(hex=hexed)
def test_label_overflow(self):
# GH14256: buffer length not checked when writing label
df = pd.DataFrame({"bar" * 100000: [1], "foo": [1337]})
assert df.to_json() == '{{"{bar}":{{"0":1}},"foo":{{"0":1337}}}}'.format(
bar=("bar" * 100000)
)
def test_series_non_unique_index(self):
s = Series(["a", "b"], index=[1, 1])
msg = "Series index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="index")
assert_series_equal(
s, read_json(s.to_json(orient="split"), orient="split", typ="series")
)
unser = read_json(s.to_json(orient="records"), orient="records", typ="series")
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_from_json_to_json(self):
def _check_orient(
series, orient, dtype=None, numpy=False, check_index_type=True
):
series = series.sort_index()
unser = read_json(
series.to_json(orient=orient),
typ="series",
orient=orient,
numpy=numpy,
dtype=dtype,
)
unser = unser.sort_index()
if orient == "records" or orient == "values":
assert_almost_equal(series.values, unser.values)
else:
if orient == "split":
assert_series_equal(
series, unser, check_index_type=check_index_type
)
else:
assert_series_equal(
series,
unser,
check_names=False,
check_index_type=check_index_type,
)
def _check_all_orients(series, dtype=None, check_index_type=True):
_check_orient(
series, "columns", dtype=dtype, check_index_type=check_index_type
)
_check_orient(
series, "records", dtype=dtype, check_index_type=check_index_type
)
_check_orient(
series, "split", dtype=dtype, check_index_type=check_index_type
)
_check_orient(
series, "index", dtype=dtype, check_index_type=check_index_type
)
_check_orient(series, "values", dtype=dtype)
_check_orient(
series,
"columns",
dtype=dtype,
numpy=True,
check_index_type=check_index_type,
)
_check_orient(
series,
"records",
dtype=dtype,
numpy=True,
check_index_type=check_index_type,
)
_check_orient(
series,
"split",
dtype=dtype,
numpy=True,
check_index_type=check_index_type,
)
_check_orient(
series,
"index",
dtype=dtype,
numpy=True,
check_index_type=check_index_type,
)
_check_orient(
series,
"values",
dtype=dtype,
numpy=True,
check_index_type=check_index_type,
)
# basic
_check_all_orients(self.series)
assert self.series.to_json() == self.series.to_json(orient="index")
objSeries = Series(
[str(d) for d in self.objSeries],
index=self.objSeries.index,
name=self.objSeries.name,
)
_check_all_orients(objSeries, dtype=False)
# empty_series has empty index with object dtype
# which cannot be revert
assert self.empty_series.index.dtype == np.object_
_check_all_orients(self.empty_series, check_index_type=False)
_check_all_orients(self.ts)
# dtype
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
_check_all_orients(Series(s, dtype=np.float64), dtype=np.float64)
_check_all_orients(Series(s, dtype=np.int), dtype=np.int)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", precise_float=True)
assert_series_equal(result, s, check_index_type=False)
def test_series_with_dtype(self):
# GH 21986
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", dtype=np.int64)
expected = Series([4] * 3)
assert_series_equal(result, expected)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
assert_frame_equal(result, df, check_index_type=False, check_column_type=False)
def test_typ(self):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
result = read_json(s.to_json(), typ=None)
assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
assert_frame_equal(result, df)
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
result = read_json(df.to_json())
assert_frame_equal(result, df)
def test_path(self):
with ensure_clean("test.json") as path:
for df in [
self.frame,
self.frame2,
self.intframe,
self.tsframe,
self.mixed_frame,
]:
df.to_json(path)
read_json(path)
def test_axis_dates(self):
# frame
json = self.tsframe.to_json()
result = read_json(json)
assert_frame_equal(result, self.tsframe)
# series
json = self.ts.to_json()
result = read_json(json, typ="series")
assert_series_equal(result, self.ts, check_names=False)
assert result.name is None
def test_convert_dates(self):
# frame
df = self.tsframe.copy()
df["date"] = Timestamp("20130101")
json = df.to_json()
result = read_json(json)
assert_frame_equal(result, df)
df["foo"] = 1.0
json = df.to_json(date_unit="ns")
result = read_json(json, convert_dates=False)
expected = df.copy()
expected["date"] = expected["date"].values.view("i8")
expected["foo"] = expected["foo"].astype("int64")
assert_frame_equal(result, expected)
# series
ts = Series(Timestamp("20130101"), index=self.ts.index)
json = ts.to_json()
result = read_json(json, typ="series")
assert_series_equal(result, ts)
def test_convert_dates_infer(self):
# GH10747
from pandas.io.json import dumps
infer_words = [
"trade_time",
"date",
"datetime",
"sold_at",
"modified",
"timestamp",
"timestamps",
]
for infer_word in infer_words:
data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
expected = DataFrame(
[[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
)
result = read_json(dumps(data))[["id", infer_word]]
assert_frame_equal(result, expected)
def test_date_format_frame(self):
df = self.tsframe.copy()
def test_w_date(date, date_unit=None):
df["date"] = Timestamp(date)
df.iloc[1, df.columns.get_loc("date")] = pd.NaT
df.iloc[5, df.columns.get_loc("date")] = pd.NaT
if date_unit:
json = df.to_json(date_format="iso", date_unit=date_unit)
else:
json = df.to_json(date_format="iso")
result = read_json(json)
expected = df.copy()
expected.index = expected.index.tz_localize("UTC")
expected["date"] = expected["date"].dt.tz_localize("UTC")
assert_frame_equal(result, expected)
test_w_date("20130101 20:43:42.123")
test_w_date("20130101 20:43:42", date_unit="s")
test_w_date("20130101 20:43:42.123", date_unit="ms")
test_w_date("20130101 20:43:42.123456", date_unit="us")
test_w_date("20130101 20:43:42.123456789", date_unit="ns")
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
df.to_json(date_format="iso", date_unit="foo")
def test_date_format_series(self):
def test_w_date(date, date_unit=None):
ts = Series(Timestamp(date), index=self.ts.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format="iso", date_unit=date_unit)
else:
json = ts.to_json(date_format="iso")
result = read_json(json, typ="series")
expected = ts.copy()
expected.index = expected.index.tz_localize("UTC")
expected = expected.dt.tz_localize("UTC")
assert_series_equal(result, expected)
test_w_date("20130101 20:43:42.123")
test_w_date("20130101 20:43:42", date_unit="s")
test_w_date("20130101 20:43:42.123", date_unit="ms")
test_w_date("20130101 20:43:42.123456", date_unit="us")
test_w_date("20130101 20:43:42.123456789", date_unit="ns")
ts = Series(Timestamp("20130101 20:43:42.123"), index=self.ts.index)
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
ts.to_json(date_format="iso", date_unit="foo")
def test_date_unit(self):
df = self.tsframe.copy()
df["date"] = Timestamp("20130101 20:43:42")
dl = df.columns.get_loc("date")
df.iloc[1, dl] = Timestamp("19710101 20:43:42")
df.iloc[2, dl] = Timestamp("21460101 20:43:42")
df.iloc[4, dl] = pd.NaT
for unit in ("s", "ms", "us", "ns"):
json = df.to_json(date_format="epoch", date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r"""{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}"""
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list("AB"))
dfj2["date"] = Timestamp("20130101")
dfj2["ints"] = range(5)
dfj2["bools"] = True
dfj2.index = pd.date_range("20130101", periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\['a', 'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with pytest.raises(AssertionError, match=error_msg):
assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
assert_frame_equal(result, expected)
@network
@pytest.mark.single
def test_round_trip_exception_(self):
# GH 3867
csv = "https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv"
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
assert_frame_equal(result.reindex(index=df.index, columns=df.columns), df)
@network
@pytest.mark.single
@pytest.mark.parametrize(
"field,dtype",
[
["created_at", pd.DatetimeTZDtype(tz="UTC")],
["closed_at", "datetime64[ns]"],
["updated_at", pd.DatetimeTZDtype(tz="UTC")],
],
)
def test_url(self, field, dtype):
url = "https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5" # noqa
result = read_json(url, convert_dates=True)
assert result[field].dtype == dtype
def test_timedelta(self):
converter = lambda x: pd.to_timedelta(x, unit="ms")
s = Series([timedelta(23), timedelta(seconds=5)])
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
assert_series_equal(result, s)
s = Series([timedelta(23), timedelta(seconds=5)], index=pd.Index([0, 1]))
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
assert_series_equal(result, s)
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
assert frame[0].dtype == "timedelta64[ns]"
assert_frame_equal(frame, pd.read_json(frame.to_json()).apply(converter))
frame = DataFrame(
{
"a": [timedelta(days=23), timedelta(seconds=5)],
"b": [1, 2],
"c": pd.date_range(start="20130101", periods=2),
}
)
result = pd.read_json(frame.to_json(date_unit="ns"))
result["a"] = pd.to_timedelta(result.a, unit="ns")
result["c"] = pd.to_datetime(result.c)
assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
frame = DataFrame(
{"a": [timedelta(23), pd.Timestamp("20130101")]}, dtype=object
)
expected = DataFrame(
{"a": [pd.Timedelta(frame.a[0]).value, pd.Timestamp(frame.a[1]).value]}
)
result = pd.read_json(frame.to_json(date_unit="ns"), dtype={"a": "int64"})
assert_frame_equal(result, expected, check_index_type=False)
def test_default_handler(self):
value = object()
frame = DataFrame({"a": [7, value]})
expected = DataFrame({"a": [7, str(value)]})
result = pd.read_json(frame.to_json(default_handler=str))
assert_frame_equal(expected, result, check_index_type=False)
def test_default_handler_indirect(self):
from pandas.io.json import dumps
def default(obj):
if isinstance(obj, complex):
return [("mathjs", "Complex"), ("re", obj.real), ("im", obj.imag)]
return str(obj)
df_list = [
9,
DataFrame(
{"a": [1, "STR", complex(4, -5)], "b": [float("nan"), None, "N/A"]},
columns=["a", "b"],
),
]
expected = (
'[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
'["re",4.0],["im",-5.0]],"N\\/A"]]]'
)
assert dumps(df_list, default_handler=default, orient="values") == expected
def test_default_handler_numpy_unsupported_dtype(self):
# GH12554 to_json raises 'Unhandled numpy dtype 15'
df = DataFrame(
{"a": [1, 2.3, complex(4, -5)], "b": [float("nan"), None, complex(1.2, 0)]},
columns=["a", "b"],
)
expected = (
'[["(1+0j)","(nan+0j)"],'
'["(2.3+0j)","(nan+0j)"],'
'["(4-5j)","(1.2+0j)"]]'
)
assert df.to_json(default_handler=str, orient="values") == expected
def test_default_handler_raises(self):
msg = "raisin"
def my_handler_raises(obj):
raise TypeError(msg)
with pytest.raises(TypeError, match=msg):
DataFrame({"a": [1, 2, object()]}).to_json(
default_handler=my_handler_raises
)
with pytest.raises(TypeError, match=msg):
DataFrame({"a": [1, 2, complex(4, -5)]}).to_json(
default_handler=my_handler_raises
)
def test_categorical(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
df["B"] = df["A"]
expected = df.to_json()
df["B"] = df["A"].astype("category")
assert expected == df.to_json()
s = df["A"]
sc = df["B"]
assert s.to_json() == sc.to_json()
def test_datetime_tz(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
tz_range = pd.date_range("20130101", periods=3, tz="US/Eastern")
tz_naive = tz_range.tz_convert("utc").tz_localize(None)
df = DataFrame({"A": tz_range, "B": pd.date_range("20130101", periods=3)})
df_naive = df.copy()
df_naive["A"] = tz_naive
expected = df_naive.to_json()
assert expected == df.to_json()
stz = Series(tz_range)
s_naive = Series(tz_naive)
assert stz.to_json() == s_naive.to_json()
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
df.loc[:8] = np.nan
sdf = df.to_sparse()
expected = df.to_json()
assert expected == sdf.to_json()
s = pd.Series(np.random.randn(10))
s.loc[:8] = np.nan
ss = s.to_sparse()
expected = s.to_json()
assert expected == ss.to_json()
def test_tz_is_utc(self):
from pandas.io.json import dumps
exp = '"2013-01-10T05:00:00.000Z"'
ts = Timestamp("2013-01-10 05:00:00Z")
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
ts = Timestamp("2013-01-10 00:00:00", tz="US/Eastern")
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
ts = Timestamp("2013-01-10 00:00:00-0500")
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
def test_tz_range_is_utc(self):
from pandas.io.json import dumps
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
dfexp = (
'{"DT":{'
'"0":"2013-01-01T05:00:00.000Z",'
'"1":"2013-01-02T05:00:00.000Z"}}'
)
tz_range = pd.date_range("2013-01-01 05:00:00Z", periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({"DT": dti})
result = dumps(df, iso_dates=True)
assert result == dfexp
tz_range = pd.date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern")
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({"DT": dti})
assert dumps(df, iso_dates=True) == dfexp
tz_range = pd.date_range("2013-01-01 00:00:00-0500", periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({"DT": dti})
assert dumps(df, iso_dates=True) == dfexp
def test_read_inline_jsonl(self):
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
assert_frame_equal(result, expected)
@td.skip_if_not_us_locale
def test_read_s3_jsonl(self, s3_resource):
# GH17200
result = read_json("s3n://pandas-test/items.jsonl", lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
assert_frame_equal(result, expected)
def test_read_local_jsonl(self):
# GH17200
with ensure_clean("tmp_items.json") as path:
with open(path, "w") as infile:
infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n')
result = read_json(path, lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
assert_frame_equal(result, expected)
def test_read_jsonl_unicode_chars(self):
# GH15132: non-ascii unicode characters
# \u201d == RIGHT DOUBLE QUOTATION MARK
# simulate file handle
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
json = StringIO(json)
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
assert_frame_equal(result, expected)
# simulate string
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
assert_frame_equal(result, expected)
def test_read_json_large_numbers(self):
# GH18842
json = '{"articleId": "1404366058080022500245"}'
json = StringIO(json)
result = read_json(json, typ="series")
expected = Series(1.404366e21, index=["articleId"])
assert_series_equal(result, expected)
json = '{"0": {"articleId": "1404366058080022500245"}}'
json = StringIO(json)
result = read_json(json)
expected = DataFrame(1.404366e21, index=["articleId"], columns=[0])
assert_frame_equal(result, expected)
def test_to_jsonl(self):
# GH9180
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
assert result == expected
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
assert result == expected
assert_frame_equal(pd.read_json(result, lines=True), df)
# GH15096: escaped characters in columns and data
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a\\\\":"foo\\\\","b":"bar"}\n' '{"a\\\\":"foo\\"","b":"bar"}'
assert result == expected
assert_frame_equal(pd.read_json(result, lines=True), df)
# TODO: there is a near-identical test for pytables; can we share?
def test_latin_encoding(self):
# GH 13774
pytest.skip("encoding not implemented in .to_json(), xref #13774")
values = [
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
]
def _try_decode(x, encoding="latin-1"):
try:
return x.decode(encoding)
except AttributeError:
return x
# not sure how to remove latin-1 from code in python 2 and 3
values = [[_try_decode(x) for x in y] for y in values]
examples = []
for dtype in ["category", object]:
for val in values:
examples.append(Series(val, dtype=dtype))
def roundtrip(s, encoding="latin-1"):
with ensure_clean("test.json") as path:
s.to_json(path, encoding=encoding)
retr = read_json(path, encoding=encoding)
assert_series_equal(s, retr, check_categorical=False)
for s in examples:
roundtrip(s)
def test_data_frame_size_after_to_json(self):
# GH15344
df = DataFrame({"a": [str(1)]})
size_before = df.memory_usage(index=True, deep=True).sum()
df.to_json()
size_after = df.memory_usage(index=True, deep=True).sum()
assert size_before == size_after
@pytest.mark.parametrize(
"index", [None, [1, 2], [1.0, 2.0], ["a", "b"], ["1", "2"], ["1.", "2."]]
)
@pytest.mark.parametrize("columns", [["a", "b"], ["1", "2"], ["1.", "2."]])
def test_from_json_to_json_table_index_and_columns(self, index, columns):
# GH25433 GH25435
expected = DataFrame([[1, 2], [3, 4]], index=index, columns=columns)
dfjson = expected.to_json(orient="table")
result = pd.read_json(dfjson, orient="table")
assert_frame_equal(result, expected)
def test_from_json_to_json_table_dtypes(self):
# GH21345
expected = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})
dfjson = expected.to_json(orient="table")
result = pd.read_json(dfjson, orient="table")
assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [True, {"b": int, "c": int}])
def test_read_json_table_dtype_raises(self, dtype):
# GH21345
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})
dfjson = df.to_json(orient="table")
msg = "cannot pass both dtype and orient='table'"
with pytest.raises(ValueError, match=msg):
pd.read_json(dfjson, orient="table", dtype=dtype)
def test_read_json_table_convert_axes_raises(self):
# GH25433 GH25435
df = DataFrame([[1, 2], [3, 4]], index=[1.0, 2.0], columns=["1.", "2."])
dfjson = df.to_json(orient="table")
msg = "cannot pass both convert_axes and orient='table'"
with pytest.raises(ValueError, match=msg):
pd.read_json(dfjson, orient="table", convert_axes=True)
@pytest.mark.parametrize(
"data, expected",
[
(
DataFrame([[1, 2], [4, 5]], columns=["a", "b"]),
{"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
),
(
DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo"),
{"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
),
(
DataFrame(
[[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]]
),
{"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
),
(Series([1, 2, 3], name="A"), {"name": "A", "data": [1, 2, 3]}),
(
Series([1, 2, 3], name="A").rename_axis("foo"),
{"name": "A", "data": [1, 2, 3]},
),
(
Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]]),
{"name": "A", "data": [1, 2]},
),
],
)
def test_index_false_to_json_split(self, data, expected):
# GH 17394
# Testing index=False in to_json with orient='split'
result = data.to_json(orient="split", index=False)
result = json.loads(result)
assert result == expected
@pytest.mark.parametrize(
"data",
[
(DataFrame([[1, 2], [4, 5]], columns=["a", "b"])),
(DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo")),
(
DataFrame(
[[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]]
)
),
(Series([1, 2, 3], name="A")),
(Series([1, 2, 3], name="A").rename_axis("foo")),
(Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]])),
],
)
def test_index_false_to_json_table(self, data):
# GH 17394
# Testing index=False in to_json with orient='table'
result = data.to_json(orient="table", index=False)
result = json.loads(result)
expected = {
"schema": pd.io.json.build_table_schema(data, index=False),
"data": DataFrame(data).to_dict(orient="records"),
}
assert result == expected
@pytest.mark.parametrize("orient", ["records", "index", "columns", "values"])
def test_index_false_error_to_json(self, orient):
# GH 17394
# Testing error message from to_json with index=False
df = pd.DataFrame([[1, 2], [4, 5]], columns=["a", "b"])
msg = "'index=False' is only valid when 'orient' is 'split' or 'table'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient, index=False)
@pytest.mark.parametrize("orient", ["split", "table"])
@pytest.mark.parametrize("index", [True, False])
def test_index_false_from_json_to_json(self, orient, index):
# GH25170
# Test index=False in from_json to_json
expected = DataFrame({"a": [1, 2], "b": [3, 4]})
dfjson = expected.to_json(orient=orient, index=index)
result = read_json(dfjson, orient=orient)
assert_frame_equal(result, expected)
def test_read_timezone_information(self):
# GH 25546
result = read_json(
'{"2019-01-01T11:00:00.000Z":88}', typ="series", orient="index"
)
expected = Series([88], index=DatetimeIndex(["2019-01-01 11:00:00"], tz="UTC"))
assert_series_equal(result, expected)
| bsd-3-clause |
p201-sp2016/infer_structcol | infer_structcol/tests/test_model.py | 1 | 4720 | '''
This file tests functions from model.py.
'''
import structcol as sc
from infer_structcol.model import *
from infer_structcol.main import Spectrum, Sample, find_close_indices
import numpy as np
from numpy.testing import assert_equal, assert_approx_equal
from pandas.util.testing import assert_frame_equal
from .test_inference import (ntrajectories, nevents, wavelength_sigma, sigma)
def test_calc_model_spect():
wavelength = [500]
sample = Sample(wavelength, 1.5, 1)
theta = (0.5, 100, 200, 0, 0, 0, 0)
wavelength_ind = find_close_indices(wavelength_sigma, sc.Quantity(wavelength,'nm'))
sigma_test = sigma[np.array(wavelength_ind)]
assert_frame_equal(calc_model_spect(sample, theta, (sigma_test, sigma_test), ntrajectories, nevents, 2),
Spectrum(500, reflectance = 0.828595524325, sigma_r = 0.0193369922424,
transmittance =0.171404475675, sigma_t = 0.0193369922424))
def test_calc_resid_spect():
spect1=Spectrum(500, reflectance = 0.5, transmittance = 0.5, sigma_r = 0.1,
sigma_t = 0.1)
spect2=Spectrum(500, reflectance = 0.7, sigma_r = 0)
expected_output = Spectrum(500, reflectance = 0.2, sigma_r = 0.1,
transmittance = np.nan, sigma_t = np.nan)
assert_frame_equal(calc_resid_spect(spect2, spect1), expected_output)
def test_log_prior():
theta_range = {'min_phi':0.35, 'max_phi':0.74, 'min_radius':70, 'max_radius': 160,
'min_thickness':1, 'max_thickness':1000}
# Test different conditions with only reflectance or transmittance
assert_approx_equal(calc_log_prior((0.5, 100, 100, 0, 1), theta_range), 0)
assert_approx_equal(calc_log_prior((0.5, 100, 100, 1,-1), theta_range), 0)
assert_equal(calc_log_prior((0.5, 100, 100, -0.5,1), theta_range), -np.inf)
assert_equal(calc_log_prior((0.5, 100, 100, 0,1.5), theta_range), -np.inf)
assert_equal(calc_log_prior((1.0, 100, 100, 1, 1), theta_range), -np.inf)
assert_equal(calc_log_prior((0.5, 10, 100, 0, 1), theta_range), -np.inf)
assert_equal(calc_log_prior((0.5, 300, 100, 0, 1), theta_range), -np.inf)
assert_equal(calc_log_prior((0.5, 100, 100, 0, 1), theta_range), calc_log_prior((0.6, 100, 100, 0, 1), theta_range))
assert_equal(calc_log_prior((0.5, 100, 1001, 0.5, 0.1), theta_range), -np.inf)
# Tests for when there is both reflectance and transmittance
assert_approx_equal(calc_log_prior((0.5, 100, 100, 0, 0, 0, 1), theta_range), 0)
assert_approx_equal(calc_log_prior((0.5, 100, 100, 0, 0, 1, -1), theta_range), 0)
assert_equal(calc_log_prior((0.5, 100, 100, -0.5, 1, -0.5, 1), theta_range), -np.inf)
assert_equal(calc_log_prior((0.5, 100, 100, 0, 1.5, 0, 1.5), theta_range), -np.inf)
assert_equal(calc_log_prior((1.0, 100, 100, 1, 1, 1, 1), theta_range), -np.inf)
assert_approx_equal(calc_log_prior((0.5, 10, 100, 0, 0, 0, 1), theta_range), -np.inf)
assert_approx_equal(calc_log_prior((0.5, 300, 100, 0, 0, 1, -1), theta_range), -np.inf)
assert_equal(calc_log_prior((0.5, 100, 100, 0, 1, 0, 1), theta_range), calc_log_prior((0.6, 100, 100, 0, 1, 0, 1), theta_range))
assert_equal(calc_log_prior((0.5, 100, 1001, 0.5, 0.1, 0.1, 0.1), theta_range), -np.inf)
def test_likelihood():
spect1=Spectrum(500, reflectance = 0.5, transmittance = 0.5, sigma_r = 0.1, sigma_t = 0.)
spect2=Spectrum(500, reflectance = 0.7, transmittance = 0.3, sigma_r = 0., sigma_t = 0.1)
expected_output = (1 / np.sqrt(2*np.pi*0.01) * np.exp(-2))**2
assert_approx_equal(calc_likelihood(spect1, spect2), expected_output)
def test_log_posterior():
wavelength = [500]
spectrum=Spectrum(wavelength, reflectance = 0.5, sigma_r = 0.1)
sample = Sample(wavelength, 1.5, 1)
theta_range = {'min_phi':0.35, 'max_phi':0.74, 'min_radius':70, 'max_radius': 201,
'min_thickness':1, 'max_thickness':1000}
wavelength_ind = find_close_indices(wavelength_sigma, sc.Quantity(wavelength,'nm'))
sigma_test = sigma[np.array(wavelength_ind)]
# When parameters are within prior range
theta1 = (0.5, 200, 200, 0, 0)
post1 = log_posterior(theta1, spectrum, sample, theta_range=theta_range,
sigma=(sigma_test, sigma_test), ntrajectories=ntrajectories, nevents=nevents, seed=2)
assert_approx_equal(post1, -6.0413752765269875)
# When parameters are not within prior range
theta2 = (0.3, 200, 200, 0, 0)
post2 = log_posterior(theta2, spectrum, sample, theta_range=theta_range,
sigma=(sigma_test, sigma_test), ntrajectories=ntrajectories, nevents=nevents, seed=2)
assert_approx_equal(post2, -1e100)
| gpl-3.0 |
guschmue/tensorflow | tensorflow/examples/learn/hdf5_classification.py | 75 | 2899 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, hdf5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
bbfamily/abu | abupy/MarketBu/ABuDataFeed.py | 1 | 18382 | # coding=utf-8
"""
内置数据源示例实现模块:
所有数据接口仅供学习使用,以及最基本使用测试,如需进一步使用,请购买数据
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import random
import math
import sqlite3 as sqlite
import pandas as pd
from ..CoreBu.ABuEnv import EMarketTargetType, EMarketSubType
from ..CoreBu import ABuEnv
from ..MarketBu import ABuNetWork
from ..MarketBu.ABuDataBase import StockBaseMarket, SupportMixin, FuturesBaseMarket, TCBaseMarket
from ..MarketBu.ABuDataParser import BDParser, TXParser, NTParser, SNUSParser
from ..MarketBu.ABuDataParser import SNFuturesParser, SNFuturesGBParser, HBTCParser
from ..UtilBu import ABuStrUtil, ABuDateUtil, ABuMd5
from ..UtilBu.ABuDTUtil import catch_error
from ..CoreBu.ABuDeprecated import AbuDeprecated
# noinspection PyUnresolvedReferences
from ..CoreBu.ABuFixes import xrange, range, filter
"""网络请求(连接10秒,接收60秒)超时时间"""
K_TIME_OUT = (10, 60)
def random_from_list(array):
"""从参数array中随机取一个元素"""
# 在array长度短的情况下,测试比np.random.choice效率要高
return array[random.randrange(0, len(array))]
@AbuDeprecated('only read old symbol db, miss update!!!')
def query_symbol_sub_market(symbol):
path = TXApi.K_SYMBOLS_DB
conn = sqlite.connect(path)
cur = conn.cursor()
symbol = symbol.lower()
query = "select {} from {} where {} like \'{}.%\'".format(TXApi.K_DB_TABLE_SN, TXApi.K_DB_TABLE_NAME,
TXApi.K_DB_TABLE_SN, symbol)
cur.execute(query)
results = cur.fetchall()
conn.close()
sub_market = ''
if results is not None and len(results) > 0:
try:
if results[0][0].find('.') > 0:
sub_market = '.' + results[0][0].split('.')[1].upper()
except:
logging.info(results)
return sub_market
@catch_error(return_val=None, log=False)
def query_symbol_from_pinyin(pinyin):
"""通过拼音对symbol进行模糊查询"""
path = TXApi.K_SYMBOLS_DB
conn = sqlite.connect(path)
cur = conn.cursor()
pinyin = pinyin.lower()
query = "select stockCode from {} where pinyin=\'{}\'".format(TXApi.K_DB_TABLE_NAME, pinyin)
cur.execute(query)
results = cur.fetchall()
conn.close()
if len(results) > 0:
code = results[0][0]
# 查询到的stcok code eg:sh111111,usabcd.n
start = 2
end = len(code)
if '.' in code:
# 如果是美股要截取.
end = code.find('.')
return code[start:end]
class BDApi(StockBaseMarket, SupportMixin):
"""bd数据源,支持港股,美股,a股"""
K_NET_CONNECT_START = '&start='
K_NET_DAY = 'http://gp.baidu.com:80/stocks/stockkline?from=android&os_ver=21&format=json&vv=3.3.0' \
'&uid=&BDUSS=&cuid=%s&channel=default_channel&device=%s&logid=%s&actionid=%s&device_net_type' \
'=wifi&period=day&stock_code=%s&fq_type=front'
MINUTE_NET_5D = 'http://gp.baidu.com:80/stocks/stocktimelinefive?from=android&os_ver=21&format=json' \
'&vv=3.3&uid=&BDUSS=&cuid=%s&channel=default_channel&device=%s&logid=%s&actionid=%s' \
'&device_net_type=wifi&stock_code=%s&step=10'
def __init__(self, symbol):
"""
:param symbol: Symbol类型对象
"""
super(BDApi, self).__init__(symbol)
self._action_id = int(ABuDateUtil.time_seconds())
self._version2_log_cnt = 0
self.data_parser_cls = BDParser
def kline(self, n_folds=2, start=None, end=None):
"""日k线接口"""
self._version2_log_cnt += 1
log_id = self._action_id + self._version2_log_cnt * 66
cuid = ABuStrUtil.create_random_with_num_low(40)
device = random_from_list(StockBaseMarket.K_DEV_MODE_LIST)
url = BDApi.K_NET_DAY % (cuid, device, str(log_id), str(self._action_id), self._symbol.value)
# logging.info(url)
next_start = None
kl_df = None
if start:
# 需重新计算n_fold
days = ABuDateUtil.diff(start, ABuDateUtil.current_str_date(), check_order=False)
# 每次返回300条数据
n_folds = int(days / 300.0)
for _ in xrange(0, n_folds):
if next_start:
url = url + BDApi.K_NET_CONNECT_START + str(next_start)
# logging.info(url)
data = ABuNetWork.get(url=url, timeout=K_TIME_OUT)
temp_df = None
if data is not None:
temp_df = self.data_parser_cls(self._symbol, data.json()).df
if temp_df is not None:
next_start = int(temp_df.loc[temp_df.index[0], ['date']].values[0])
kl_df = temp_df if kl_df is None else pd.concat([temp_df, kl_df])
# 因为是从前向后请求,且与时间无关,所以可以直接在for里面中断
if kl_df is None:
return None
"""由于每次放回300条>1年的数据,所以超出总数就不再请求下一组"""
if kl_df.shape[0] > ABuEnv.g_market_trade_year * n_folds:
break
return StockBaseMarket._fix_kline_pd(kl_df, n_folds, start, end)
def minute(self, n_folds=5, *args, **kwargs):
self._version2_log_cnt += 1
cuid = ABuStrUtil.create_random_with_num_low(40)
log_id = self._action_id + self._version2_log_cnt * 66
device = random_from_list(StockBaseMarket.K_DEV_MODE_LIST)
url = BDApi.MINUTE_NET_5D % (cuid, device, str(log_id), str(self._action_id), self._symbol.value)
return ABuNetWork.get(url=url, timeout=K_TIME_OUT).json()
class TXApi(StockBaseMarket, SupportMixin):
"""tx数据源,支持港股,美股,a股"""
K_NET_BASE = "http://ifzq.gtimg.cn/appstock/app/%sfqkline/get?p=1¶m=%s,day,,,%d," \
"qfq&_appName=android&_dev=%s&_devId=%s&_mid=%s&_md5mid=%s&_appver=4.2.2&_ifChId=303&_screenW=%d" \
"&_screenH=%d&_osVer=%s&_uin=10000&_wxuin=20000&__random_suffix=%d"
K_NET_HK_MNY = 'http://proxy.finance.qq.com/ifzqgtimg/stock/corp/hkmoney/sumary?' \
'symbol=%s&type=sum&jianjie=1&_appName=android' \
'&_dev=%s&_devId=%s&_mid=%s&_md5mid=%s&_appver=5.5.0&_ifChId=277' \
'&_screenW=%d&_screenH=%d&_osVer=%s&_uin=10000&_wxuin=20000&_net=WIFI&__random_suffix=%d'
K_DB_TABLE_NAME = "values_table"
K_DB_TABLE_SN = "stockCode"
p_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.pardir))
K_SYMBOLS_DB = os.path.join(p_dir, 'RomDataBu/symbols_db.db')
def __init__(self, symbol):
"""
:param symbol: Symbol类型对象
"""
super(TXApi, self).__init__(symbol)
# 设置数据源解析对象类
self.data_parser_cls = TXParser
def kline(self, n_folds=2, start=None, end=None):
"""日k线接口"""
cuid = ABuStrUtil.create_random_with_num_low(40)
cuid_md5 = ABuMd5.md5_from_binary(cuid)
random_suffix = ABuStrUtil.create_random_with_num(5)
dev_mod = random_from_list(StockBaseMarket.K_DEV_MODE_LIST)
os_ver = random_from_list(StockBaseMarket.K_OS_VERSION_LIST)
screen = random_from_list(StockBaseMarket.K_PHONE_SCREEN)
days = ABuEnv.g_market_trade_year * n_folds + 1
# start 不为空时计算 获取天数,获取的数据肯定比预期的数据多,因为同一时间内,交易日的天数一定不比实际的天数多
if start:
temp_end = ABuDateUtil.current_str_date()
days = ABuDateUtil.diff(start, temp_end, check_order=False)
sub_market = None
if self._symbol.market == EMarketTargetType.E_MARKET_TARGET_US:
# sub_market = self.query_symbol_sub_market(self._symbol.value)
market = self._symbol.market.value
if '.' in self._symbol.value:
# 如果已经有.了说明是大盘,大盘不需要子市场,eg:us.IXIC
sub_market = ''
else:
# 这里tx的source不支持US_PINK, US_OTC, US_PREIPO
sub_market_map = {EMarketSubType.US_N.value: 'n',
EMarketSubType.US_PINK.value: 'n',
EMarketSubType.US_OTC.value: 'n',
EMarketSubType.US_PREIPO.value: 'n',
EMarketSubType.US_AMEX.value: 'n',
EMarketSubType.US_OQ.value: 'oq'}
sub_market = '.{}'.format(sub_market_map[self._symbol.sub_market.value])
url = TXApi.K_NET_BASE % (
market, self._symbol.value + sub_market, days,
dev_mod, cuid, cuid, cuid_md5, screen[0], screen[1], os_ver, int(random_suffix, 10))
elif self._symbol.market == EMarketTargetType.E_MARKET_TARGET_HK:
market = self._symbol.market.value
url = TXApi.K_NET_BASE % (
market, self._symbol.value, days,
dev_mod, cuid, cuid, cuid_md5, screen[0], screen[1], os_ver, int(random_suffix, 10))
else:
market = ''
url = TXApi.K_NET_BASE % (
market, self._symbol.value, days,
dev_mod, cuid, cuid, cuid_md5, screen[0], screen[1], os_ver, int(random_suffix, 10))
data = ABuNetWork.get(url, timeout=K_TIME_OUT)
if data is not None:
kl_pd = self.data_parser_cls(self._symbol, sub_market, data.json()).df
else:
return None
return StockBaseMarket._fix_kline_pd(kl_pd, n_folds, start, end)
def hkmoney(self):
"""港股概要信息接口"""
if self._symbol.market != EMarketTargetType.E_MARKET_TARGET_HK:
raise TypeError('hkmoney only support hk!!')
cuid = ABuStrUtil.create_random_with_num_low(40)
cuid_md5 = ABuMd5.md5_from_binary(cuid)
random_suffix = ABuStrUtil.create_random_with_num(5)
dev_mod = random_from_list(StockBaseMarket.K_DEV_MODE_LIST)
os_ver = random_from_list(StockBaseMarket.K_OS_VERSION_LIST)
screen = random_from_list(StockBaseMarket.K_PHONE_SCREEN)
url = TXApi.K_NET_HK_MNY % (self._symbol.value, dev_mod, cuid, cuid, cuid_md5, screen[0], screen[1], os_ver,
int(random_suffix, 10))
return ABuNetWork.get(url, timeout=K_TIME_OUT)
def minute(self, n_fold=5, *args, **kwargs):
"""分钟k线接口"""
raise NotImplementedError('TXApi minute NotImplementedError!')
class NTApi(StockBaseMarket, SupportMixin):
"""nt数据源,支持港股,美股,a股"""
K_NET_BASE = "http://img1.money.126.net/data/%s/kline/day/history/%d/%s.json"
def __init__(self, symbol):
"""
:param symbol: Symbol类型对象
"""
super(NTApi, self).__init__(symbol)
# 设置数据源解析对象类
self.data_parser_cls = NTParser
def kline(self, n_folds=2, start=None, end=None):
"""日k线接口"""
kl_df = None
if start is None or end is None:
end_year = int(ABuDateUtil.current_str_date()[:4])
start_year = end_year - n_folds + 1
else:
start_year = int(start[:4])
end_year = int(end[:4])
req_year = list(range(start_year, end_year + 1))
if self._symbol.market == EMarketTargetType.E_MARKET_TARGET_US:
market = self._symbol.market.value
symbol = self._symbol.symbol_code.upper()
if self._symbol.is_us_index():
# ntes 需要做映射匹配大盘symbol
index_dict = {'.DJI': 'DOWJONES', '.IXIC': 'NASDAQ', '.INX': 'SP500'}
symbol = index_dict[symbol]
elif self._symbol.market == EMarketTargetType.E_MARKET_TARGET_HK:
market = self._symbol.market.value
symbol = self._symbol.symbol_code.upper()
elif self._symbol.market == EMarketTargetType.E_MARKET_TARGET_CN:
market = self._symbol.market.value
symbol = self._symbol.symbol_code
if self._symbol.is_sz_stock():
symbol = '1{}'.format(symbol)
else:
symbol = '0{}'.format(symbol)
else:
raise TypeError('NTApi dt support {}'.format(self._symbol.market))
for year in req_year:
url = NTApi.K_NET_BASE % (market, year, symbol)
data = ABuNetWork.get(url=url, retry=1, timeout=K_TIME_OUT)
temp_df = None
if data is not None:
temp_df = self.data_parser_cls(self._symbol, data.json()).df
if temp_df is not None:
kl_df = temp_df if kl_df is None else kl_df.append(temp_df)
if kl_df is None:
return None
return StockBaseMarket._fix_kline_pd(kl_df, n_folds, start, end)
def minute(self, n_fold=5, *args, **kwargs):
"""分钟k线接口"""
raise NotImplementedError('NTApi minute NotImplementedError!')
class SNUSApi(StockBaseMarket, SupportMixin):
"""snus数据源,支持美股"""
K_NET_BASE = "http://stock.finance.sina.com.cn/usstock/api/json_v2.php/US_MinKService.getDailyK?" \
"symbol=%s&___qn=3n"
def __init__(self, symbol):
"""
:param symbol: Symbol类型对象
"""
super(SNUSApi, self).__init__(symbol)
# 设置数据源解析对象类
self.data_parser_cls = SNUSParser
def _support_market(self):
"""声明数据源支持美股"""
return [EMarketTargetType.E_MARKET_TARGET_US]
def kline(self, n_folds=2, start=None, end=None):
"""日k线接口"""
url = SNUSApi.K_NET_BASE % self._symbol.symbol_code
data = ABuNetWork.get(url=url, timeout=K_TIME_OUT).json()
kl_df = self.data_parser_cls(self._symbol, data).df
if kl_df is None:
return None
return StockBaseMarket._fix_kline_pd(kl_df, n_folds, start, end)
def minute(self, n_fold=5, *args, **kwargs):
"""分钟k线接口"""
raise NotImplementedError('SNUSApi minute NotImplementedError!')
class SNFuturesApi(FuturesBaseMarket, SupportMixin):
"""sn futures数据源,支持国内期货"""
K_NET_BASE = "http://stock.finance.sina.com.cn/futures/api/json_v2.php/" \
"IndexService.getInnerFuturesDailyKLine?symbol=%s"
def __init__(self, symbol):
"""
:param symbol: Symbol类型对象
"""
super(SNFuturesApi, self).__init__(symbol)
# 设置数据源解析对象类
self.data_parser_cls = SNFuturesParser
def _support_market(self):
"""声明数据源支持期货数据"""
return [EMarketTargetType.E_MARKET_TARGET_FUTURES_CN]
def kline(self, n_folds=2, start=None, end=None):
"""日k线接口"""
url = SNFuturesApi.K_NET_BASE % self._symbol.symbol_code
data = ABuNetWork.get(url=url, timeout=K_TIME_OUT).json()
kl_df = self.data_parser_cls(self._symbol, data).df
if kl_df is None:
return None
return FuturesBaseMarket._fix_kline_pd(kl_df, n_folds, start, end)
class SNFuturesGBApi(FuturesBaseMarket, SupportMixin):
"""sn futures数据源,支持国际期货"""
K_NET_BASE = "http://stock2.finance.sina.com.cn/futures/api/jsonp.php/" \
"var %s%s=/GlobalFuturesService.getGlobalFuturesDailyKLine?symbol=%s&_=%s"
def __init__(self, symbol):
"""
:param symbol: Symbol类型对象
"""
super(SNFuturesGBApi, self).__init__(symbol)
# 设置数据源解析对象类
self.data_parser_cls = SNFuturesGBParser
def _support_market(self):
"""声明数据源支持期货数据, 支持国际期货市场"""
return [EMarketTargetType.E_MARKET_TARGET_FUTURES_GLOBAL]
def kline(self, n_folds=2, start=None, end=None):
"""日k线接口"""
today = ABuDateUtil.current_str_date().replace('-', '_')
url = SNFuturesGBApi.K_NET_BASE % (self._symbol.symbol_code, today, self._symbol.symbol_code, today)
data = ABuNetWork.get(url=url, timeout=(10, 60))
text = data.text
# 返回的是Javascript字符串解析出dict
js_dict = ABuNetWork.parse_js(text[text.find('=(') + 2:text.rfind(')')])
kl_df = self.data_parser_cls(self._symbol, js_dict).df
if kl_df is None:
return None
return FuturesBaseMarket._fix_kline_pd(kl_df, n_folds, start, end)
class HBApi(TCBaseMarket, SupportMixin):
"""hb数据源,支持币类,比特币,莱特币"""
K_NET_BASE = 'https://www.huobi.com/qt/staticmarket/%s_kline_100_json.js?length=%d'
def __init__(self, symbol):
"""
:param symbol: Symbol类型对象
"""
super(HBApi, self).__init__(symbol)
# 设置数据源解析对象类
self.data_parser_cls = HBTCParser
def _support_market(self):
"""只支持币类市场"""
return [EMarketTargetType.E_MARKET_TARGET_TC]
def kline(self, n_folds=2, start=None, end=None):
"""日k线接口"""
req_cnt = n_folds * ABuEnv.g_market_trade_year
if start is not None and end is not None:
# 向上取整数,下面使用_fix_kline_pd再次进行剪裁, 要使用current_str_date不能是end
folds = math.ceil(ABuDateUtil.diff(ABuDateUtil.date_str_to_int(start),
ABuDateUtil.current_str_date()) / 365)
req_cnt = folds * ABuEnv.g_market_trade_year
url = HBApi.K_NET_BASE % (self._symbol.symbol_code, req_cnt)
data = ABuNetWork.get(url=url, timeout=K_TIME_OUT).json()
kl_df = self.data_parser_cls(self._symbol, data).df
if kl_df is None:
return None
return TCBaseMarket._fix_kline_pd(kl_df, n_folds, start, end)
def minute(self, *args, **kwargs):
"""分钟k线接口"""
raise NotImplementedError('HBApi minute NotImplementedError!')
| gpl-3.0 |
wellflat/cat-fancier | classifier/bin/classifier.py | 1 | 2726 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
import csv
import os
import re
import caffe
import numpy as np
from sklearn import preprocessing
from sklearn.externals import joblib
from pprint import pprint
def classify(imagelist, labels, protofilename, pretrainedname,
meanfilename, modelfilename):
mean = np.load(meanfilename)
net = caffe.Classifier(protofilename, pretrainedname, mean=mean,
channel_swap=(2,1,0), image_dims=(256,256), raw_scale=255)
print('# ----- Target labels -----')
print(labels)
estimator = joblib.load(modelfilename)
clf = estimator.best_estimator_
print(clf)
features = []
for imagefilename in imagelist:
image = caffe.io.load_image(imagefilename)
oversampled = caffe.io.oversample([caffe.io.resize_image(image, net.image_dims)],
net.crop_dims)
inputdata = np.asarray([net.preprocess('data', in_) for in_ in oversampled])
net.forward(data=inputdata)
feature = net.blobs['fc6i'].data[4]
flattenfeature = feature.flatten().tolist()
scaledfeature = preprocessing.scale(flattenfeature)
features.append(scaledfeature)
#predlabels = clf.predict(features)
predprobas = clf.predict_proba(features)
predictions = []
#print(predprobas)
for predproba in predprobas:
topk = predproba.argsort()[-1:-6:-1]
predictions.append(zip(labels[topk], predproba[topk]))
pprint(zip(imagelist, predictions))
def getlabels(labelfilename):
labels = []
reader = csv.reader(file(labelfilename, 'r'), delimiter='\t', lineterminator='\n')
for line in reader:
labels.append(line[0])
return np.array(labels)
def createimagelist(imagefileordir):
if os.path.isdir(imagefileordir):
pattern = re.compile('.*[.](jpg|jpeg|png|bmp|gif)$')
images = [imagefileordir + '/' + image for image
in os.listdir(imagefileordir) if re.match(pattern, image)]
elif os.path.isfile(imagefileordir):
images = [imagefileordir]
return images
if __name__ == '__main__':
os.chdir(os.path.dirname(__file__))
IMAGE_FILE = '../data/cat_images/Russian_Blue_212.jpg'
IMAGE_DIR = '../../cat_test_images'
LABEL_FILE = '../data/cat_label.tsv'
PROTO_FILE = '../data/imagenet_feature.prototxt'
PRETRAINED = '../data/caffe_reference_imagenet_model'
MEAN_FILE = '../data/ilsvrc_2012_mean.npy'
MODEL_FILE = '../data/models/cat_model_lr.pkl'
imagelist = createimagelist(IMAGE_FILE)
labels = getlabels(LABEL_FILE)
classify(imagelist, labels, PROTO_FILE, PRETRAINED, MEAN_FILE, MODEL_FILE)
| mit |
davidsamu/seal | seal/roc/roccore.py | 1 | 9590 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Functions for performing and processing ROC analyses.
@author: David Samu
"""
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import permutation_test_score
from seal.util import util
from seal.roc import rocutil, rocpost
# Some analysis constants.
min_sample_size = 10
n_folds = 5
n_jobs = 1 # or util.get_n_cores() - 1
# DataFrame containing analysis params per feature to analyze.
feat_pars = {'DS': ('pref_anti_dirs', 'pref and anti dirs', None, None),
# 'CE': ('S_D_trials', 'S and D trials', 1900*ms, None)}
'CE': ('S_D_trials', 'S and D trials', None, None)}
feat_pars = pd.DataFrame(feat_pars, index=('prefix', 'btw_str', 't1', 't2')).T
# %% Core ROC analysis functions.
def calc_auc(clf, x, y):
"""
Calculate area under the curve of ROC analysis.
y values have to be 0 and 1!
"""
# Format x into array of arrays.
if len(x.shape) < 2:
x = np.array(x, ndmin=2).T
# Fit model to data.
clf.fit(x, y)
# Get prediction probability of classes.
preds = clf.predict_proba(x)
# Select class of higher mean to be taken as one to be predicted.
# idx = pd.Series(x, index=y).groupby(y).mean().idxmax() # much slower :(
idx = int(np.mean(x[y == 0]) < np.mean(x[y == 1]))
y_pred = preds[:, idx]
# Calculate area under the curve (AUC) using true and predicted y values.
auc = roc_auc_score(y, y_pred)
return auc
def ROC(x, y, n_perm=None, clf=None):
"""
Perform ROC analysis with optional permutation test.
y values have to be 0 and 1 for calc_auc()!
"""
# Remove NaN values.
idx = np.logical_and(~np.isnan(x), ~np.isnan(y))
x, y = np.array(x[idx]), np.array(y[idx])
# Insufficient sample size or not exactly two values to classify.
n_yvals = len(np.unique(y))
if (min(len(x), len(y)) < min_sample_size) or (n_yvals != 2):
if n_yvals > 2:
print('More than two values to classify:' + str(np.unique(y)))
return np.nan, None
# Format x into array of arrays.
x = np.array(x, ndmin=2).T
# Default classifier.
if clf is None:
clf = LogisticRegression()
# Calculate AUC of true data.
true_auc = calc_auc(clf, x, y)
# Permutation test.
pvalue = None
if n_perm is not None and n_perm > 0:
cv = StratifiedKFold(n_folds)
# Test significance of classification with cross-validated permutation.
res = permutation_test_score(clf, x, y, scoring='accuracy', cv=cv,
n_permutations=n_perm, n_jobs=n_jobs)
score, perm_scores, pvalue = res
return true_auc, pvalue
# %% Higher level functions to run AROC on a unit and group of units over time.
def run_ROC_over_time(rates1, rates2, n_perm=None, clf=None):
"""Run ROC analysis between two rate frames (trials by time)."""
# Merge rates and create and target vector.
rates = pd.concat([rates1, rates2])
target_vec = pd.Series(len(rates.index)*[1], index=rates.index)
target_vec[rates2.index] = 0 # all y values have to be 0/1 for ROC
# Default classifier.
if clf is None:
clf = LogisticRegression()
# Run ROC across time.
roc_res = pd.DataFrame([ROC(rates[t], target_vec, n_perm, clf)
for t in rates],
index=rates.columns, columns=['auc', 'pval'])
return roc_res
def run_unit_ROC_over_time(u, prd, ref_ev, trs_list, nrate, tstep, n_perm,
zscore_by, verbose):
"""Run ROC analysis of unit over time. Suitable for parallelization."""
# Report progress.
if verbose:
print(u.Name)
# Set up params: trials, time period and rates.
t1s, t2s = u.pr_times(prd, concat=False)
ref_ts = u.ev_times(ref_ev)
# Prepare rates.
rates1, rates2 = [u._Rates[nrate].get_rates(trs, t1s, t2s, ref_ts, tstep)
for trs in trs_list]
# Z-score rates by some trial parameter.
rates = pd.concat([rates1, rates2])
if zscore_by is not None:
ztrs = u.trials_by_param(zscore_by)
for par, trs in ztrs.items():
itrs = rates.index.intersection(trs)
if len(itrs):
irates = rates.loc[itrs]
mean = irates.mean()
std = irates.std(ddof=0)
rates.loc[itrs, :] = (irates - mean)/std
rates1, rates2 = [rates.loc[r.index] for r in (rates1, rates2)]
# Calculate AROC on rates.
aroc_res = run_ROC_over_time(rates1, rates2, n_perm)
return aroc_res
def run_group_ROC_over_time(ulist, trs_list, prd, ref_ev, n_perm=None,
nrate=None, tstep=None, zscore_by=None,
verbose=True):
"""Run ROC over list of units over time."""
# Run unit-wise AROC test in pool.
params = [(u, prd, ref_ev, trs_list[i], nrate, tstep,
n_perm, zscore_by, verbose) for i, u in enumerate(ulist)]
aroc_res = util.run_in_pool(run_unit_ROC_over_time, params)
# Separate AUC and p-values.
aroc_dict = {u.Name: aroc.auc for u, aroc in zip(ulist, aroc_res)}
pval_dict = {u.Name: aroc.pval for u, aroc in zip(ulist, aroc_res)}
# Concat into DF.
aroc_res = pd.concat(aroc_dict, axis=1).T
pval_res = pd.concat(pval_dict, axis=1).T
return aroc_res, pval_res
def calc_AROC(ulist, trs_list, prd_pars, n_perm, nrate, tstep, fres,
verbose=True, rem_all_nan_units=True, rem_any_nan_times=True):
"""Calculate and plot AROC over time between specified sets of trials."""
stims = prd_pars.index
aroc_list, pval_list = [], []
for stim in stims:
print(' ' + stim)
# Extract period params.
pars = ['prd', 'ref_ev', 'feat', 'cond_by', 'zscore_by']
prd, ref_ev, sfeat, sep_by, zscore_by = prd_pars.loc[stim, pars]
# Calculate AROC DF.
aroc, pval = run_group_ROC_over_time(ulist, trs_list[stim], prd,
ref_ev, n_perm, nrate, tstep,
zscore_by, verbose)
aroc_list.append(aroc)
pval_list.append(pval)
# Concatenate stimulus-specific results.
tshifts = list(prd_pars.stim_start)
truncate_prds = [list(prd_pars.loc[stim, ['prd_start', 'prd_stop']])
for stim in stims]
aroc = util.concat_stim_prd_res(aroc_list, tshifts, truncate_prds,
rem_all_nan_units, rem_any_nan_times)
pval = util.concat_stim_prd_res(pval_list, tshifts, truncate_prds,
rem_all_nan_units, rem_any_nan_times)
# Refill pvals with NaN's if it gets completely emptied above.
if pval.empty:
pval = pd.DataFrame(columns=pval.columns, index=aroc.index)
# Save results.
if fres is not None:
aroc_res = {'aroc': aroc, 'pval': pval, 'nrate': nrate,
'tstep': tstep, 'n_perm': n_perm}
util.write_objects(aroc_res, fres)
return aroc, pval
# %% ROC misc functions.
def run_ROC(ulist, trs_list, nrate, tstep, n_perm, offsets, prd_pars,
ares_dir):
# Calculate and save AROC results.
fres = rocutil.aroc_res_fname(ares_dir, nrate, tstep, n_perm, offsets)
calc_AROC(ulist, trs_list, prd_pars, n_perm, nrate, tstep, fres)
def plot_ROC(nrate, task, sort_prds, prd_pars, tstep, n_perm, offsets, prefix,
btw_str, pth, min_len, vth_hi, vth_lo, cmaps, merge_hi_lo,
flip_aroc_vals, ares_dir, t1=None, t2=None):
# Plot AROC matrix sorted by different periods.
res = rocutil.load_aroc_res(ares_dir, nrate, tstep, n_perm, offsets)
aroc, pval = res['aroc'], res['pval']
# Plot results on heatmap.
rocpost.plot_ROC_heatmap(aroc, pval, task, nrate, tstep, n_perm,
sort_prds, prd_pars, offsets, ares_dir,
prefix, btw_str, pth, min_len, vth_hi, vth_lo,
cmaps, merge_hi_lo, flip_aroc_vals, t1, t2)
def run_plot_feat_ROC(task, feat, feat_params, sort_prds, prd_pars, stims,
tstep, nrate, n_perm, offsets, pth, min_len, nlist,
ulist, runroc, plotroc, vth_hi, vth_lo, cmaps,
merge_hi_lo, flip_aroc_vals, aroc_res_dir,
t1=None, t2=None):
"""Run and plot ROC run on given feature."""
ares_dir = aroc_res_dir + '{}/{}/'.format(feat, nlist)
prefix, btw_str, t1, t2 = feat_params.loc[feat]
if runroc:
# Collect trials to use.
if feat == 'DS':
trs_list = {stim: [u.dir_pref_anti_trials(stim, [stim], offsets)
for u in ulist] for stim in stims}
elif feat == 'CE':
trs_list = {stim: [u.S_D_trials('S2', offsets)
for u in ulist] for stim in stims}
else:
print('Unknown feature to run ROC on: ', feat)
return
# Run ROC.
run_ROC(ulist, trs_list, nrate, tstep, n_perm, offsets, prd_pars,
ares_dir)
if plotroc:
# Plot ROC results.
plot_ROC(nrate, task, sort_prds, prd_pars, tstep, n_perm, offsets,
prefix, btw_str, pth, min_len, vth_hi, vth_lo, cmaps,
merge_hi_lo, flip_aroc_vals, ares_dir, t1, t2)
| gpl-3.0 |
mrry/tensorflow | tensorflow/examples/skflow/text_classification_save_restore.py | 14 | 3603 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
from tensorflow.contrib import learn
### Training data
# Downloads, unpacks and reads DBpedia dataset.
dbpedia = learn.datasets.load_dataset('dbpedia')
X_train, y_train = pandas.DataFrame(dbpedia.train.data)[1], pandas.Series(dbpedia.train.target)
X_test, y_test = pandas.DataFrame(dbpedia.test.data)[1], pandas.Series(dbpedia.test.target)
### Process vocabulary
MAX_DOCUMENT_LENGTH = 10
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
X_train = np.array(list(vocab_processor.fit_transform(X_train)))
X_test = np.array(list(vocab_processor.transform(X_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
### Models
EMBEDDING_SIZE = 50
def average_model(X, y):
word_vectors = learn.ops.categorical_variable(X, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
features = tf.reduce_max(word_vectors, reduction_indices=1)
return learn.models.logistic_regression(features, y)
def rnn_model(X, y):
"""Recurrent neural network model to predict from sequence of words
to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = learn.ops.categorical_variable(X, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unpack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.nn.rnn_cell.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.nn.rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
return learn.models.logistic_regression(encoding, y)
model_path = '/tmp/skflow_examples/text_classification'
if os.path.exists(model_path):
classifier = learn.TensorFlowEstimator.restore(model_path)
else:
classifier = learn.TensorFlowEstimator(model_fn=rnn_model, n_classes=15,
steps=100, optimizer='Adam', learning_rate=0.01, continue_training=True)
# Continuously train for 1000 steps
while True:
try:
classifier.fit(X_train, y_train)
except KeyboardInterrupt:
classifier.save(model_path)
break
# Predict on test set
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
| apache-2.0 |
LohithBlaze/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
nkhuyu/data-science-from-scratch | code/recommender_systems.py | 60 | 6291 | from __future__ import division
import math, random
from collections import defaultdict, Counter
from linear_algebra import dot
users_interests = [
["Hadoop", "Big Data", "HBase", "Java", "Spark", "Storm", "Cassandra"],
["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"],
["Python", "scikit-learn", "scipy", "numpy", "statsmodels", "pandas"],
["R", "Python", "statistics", "regression", "probability"],
["machine learning", "regression", "decision trees", "libsvm"],
["Python", "R", "Java", "C++", "Haskell", "programming languages"],
["statistics", "probability", "mathematics", "theory"],
["machine learning", "scikit-learn", "Mahout", "neural networks"],
["neural networks", "deep learning", "Big Data", "artificial intelligence"],
["Hadoop", "Java", "MapReduce", "Big Data"],
["statistics", "R", "statsmodels"],
["C++", "deep learning", "artificial intelligence", "probability"],
["pandas", "R", "Python"],
["databases", "HBase", "Postgres", "MySQL", "MongoDB"],
["libsvm", "regression", "support vector machines"]
]
popular_interests = Counter(interest
for user_interests in users_interests
for interest in user_interests).most_common()
def most_popular_new_interests(user_interests, max_results=5):
suggestions = [(interest, frequency)
for interest, frequency in popular_interests
if interest not in user_interests]
return suggestions[:max_results]
#
# user-based filtering
#
def cosine_similarity(v, w):
return dot(v, w) / math.sqrt(dot(v, v) * dot(w, w))
unique_interests = sorted(list({ interest
for user_interests in users_interests
for interest in user_interests }))
def make_user_interest_vector(user_interests):
"""given a list of interests, produce a vector whose i-th element is 1
if unique_interests[i] is in the list, 0 otherwise"""
return [1 if interest in user_interests else 0
for interest in unique_interests]
user_interest_matrix = map(make_user_interest_vector, users_interests)
user_similarities = [[cosine_similarity(interest_vector_i, interest_vector_j)
for interest_vector_j in user_interest_matrix]
for interest_vector_i in user_interest_matrix]
def most_similar_users_to(user_id):
pairs = [(other_user_id, similarity) # find other
for other_user_id, similarity in # users with
enumerate(user_similarities[user_id]) # nonzero
if user_id != other_user_id and similarity > 0] # similarity
return sorted(pairs, # sort them
key=lambda (_, similarity): similarity, # most similar
reverse=True) # first
def user_based_suggestions(user_id, include_current_interests=False):
# sum up the similarities
suggestions = defaultdict(float)
for other_user_id, similarity in most_similar_users_to(user_id):
for interest in users_interests[other_user_id]:
suggestions[interest] += similarity
# convert them to a sorted list
suggestions = sorted(suggestions.items(),
key=lambda (_, weight): weight,
reverse=True)
# and (maybe) exclude already-interests
if include_current_interests:
return suggestions
else:
return [(suggestion, weight)
for suggestion, weight in suggestions
if suggestion not in users_interests[user_id]]
#
# Item-Based Collaborative Filtering
#
interest_user_matrix = [[user_interest_vector[j]
for user_interest_vector in user_interest_matrix]
for j, _ in enumerate(unique_interests)]
interest_similarities = [[cosine_similarity(user_vector_i, user_vector_j)
for user_vector_j in interest_user_matrix]
for user_vector_i in interest_user_matrix]
def most_similar_interests_to(interest_id):
similarities = interest_similarities[interest_id]
pairs = [(unique_interests[other_interest_id], similarity)
for other_interest_id, similarity in enumerate(similarities)
if interest_id != other_interest_id and similarity > 0]
return sorted(pairs,
key=lambda (_, similarity): similarity,
reverse=True)
def item_based_suggestions(user_id, include_current_interests=False):
suggestions = defaultdict(float)
user_interest_vector = user_interest_matrix[user_id]
for interest_id, is_interested in enumerate(user_interest_vector):
if is_interested == 1:
similar_interests = most_similar_interests_to(interest_id)
for interest, similarity in similar_interests:
suggestions[interest] += similarity
suggestions = sorted(suggestions.items(),
key=lambda (_, similarity): similarity,
reverse=True)
if include_current_interests:
return suggestions
else:
return [(suggestion, weight)
for suggestion, weight in suggestions
if suggestion not in users_interests[user_id]]
if __name__ == "__main__":
print "Popular Interests"
print popular_interests
print
print "Most Popular New Interests"
print "already like:", ["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"]
print most_popular_new_interests(["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"])
print
print "already like:", ["R", "Python", "statistics", "regression", "probability"]
print most_popular_new_interests(["R", "Python", "statistics", "regression", "probability"])
print
print "User based similarity"
print "most similar to 0"
print most_similar_users_to(0)
print "Suggestions for 0"
print user_based_suggestions(0)
print
print "Item based similarity"
print "most similar to 'Big Data'"
print most_similar_interests_to(0)
print
print "suggestions for user 0"
print item_based_suggestions(0)
| unlicense |
maxalbert/bokeh | bokeh/properties.py | 1 | 62784 | """ Properties are objects that can be assigned as class level
attributes on Bokeh models, to provide automatic serialization
and validation.
For example, the following defines a model that has integer,
string, and list[float] properties::
class Model(HasProps):
foo = Int
bar = String
baz = List(Float)
The properties of this class can be initialized by specifying
keyword arguments to the initializer::
m = Model(foo=10, bar="a str", baz=[1,2,3,4])
But also by setting the attributes on an instance::
m.foo = 20
Attempts to set a property to a value of the wrong type will
result in a ``ValueError`` exception::
>>> m.foo = 2.3
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 585, in __setattr__
super(HasProps, self).__setattr__(name, value)
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 159, in __set__
raise e
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 152, in __set__
self.validate(value)
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 707, in validate
(nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))
ValueError: expected a value of type int8, int16, int32, int64 or int, got 2.3 of type float
Additionally, properties know how to serialize themselves,
to be understood by BokehJS.
"""
from __future__ import absolute_import, print_function
import logging
logger = logging.getLogger(__name__)
import collections
from copy import copy
import datetime
import dateutil.parser
import difflib
from importlib import import_module
import inspect
import numbers
import re
import types
from warnings import warn
from six import string_types, iteritems
from . import enums
from .util.future import with_metaclass
from .util.string import nice_join
from .property_containers import PropertyValueList, PropertyValueDict, PropertyValueContainer
def field(name):
''' Convenience function do explicitly mark a field specification for
a Bokeh model property.
Args:
name (str) : name of a data source field to reference for a property.
Returns:
dict : `{"field": name}`
Note:
This function is included for completeness. String values for
property specifications are by default interpreted as field names.
'''
return dict(field=name)
def value(val):
''' Convenience function do explicitly mark a value specification for
a Bokeh model property.
Args:
val (any) : a fixed value to specify for a property.
Returns:
dict : `{"value": name}`
Note:
String values for property specifications are by default interpreted
as field names. This function is especially useful when you want to
specify a fixed value with text properties.
Example:
.. code-block:: python
# The following will take text values to render from a data source
# column "text_column", but use a fixed value "12pt" for font size
p.text("x", "y", text="text_column",
text_font_size=value("12pt"), source=source)
'''
return dict(value=val)
bokeh_bool_types = (bool,)
try:
import numpy as np
bokeh_bool_types += (np.bool8,)
except ImportError:
pass
bokeh_integer_types = (numbers.Integral,)
# used to indicate properties that are not set (vs null, None, etc)
class _NotSet(object):
pass
class DeserializationError(Exception):
pass
class PropertyFactory(object):
""" Base class for objects that can generate Property instances. """
@classmethod
def autocreate(cls):
""" Called by the metaclass to create a
new instance of this descriptor
if the user just assigned it to a property without trailing
parentheses.
"""
return cls()
def make_properties(self, base_name):
""" Returns a list of Property instances. """
raise NotImplementedError("make_properties not implemented")
class PropertyDescriptor(PropertyFactory):
""" Base class for a description of a property, not associated yet with an attribute name or a class."""
def __init__(self, default=None, help=None, serialized=True):
""" This is how the descriptor is created in the class declaration. """
self._serialized = serialized
self._default = default
self.__doc__ = help
self.alternatives = []
# "fail early" when a default is invalid
self.validate(self._raw_default())
def __str__(self):
return self.__class__.__name__
def make_properties(self, base_name):
return [ BasicProperty(descriptor=self, name=base_name) ]
def _has_stable_default(self):
""" True if we have a default that will be the same every time and is not mutable."""
if isinstance(self._default, types.FunctionType):
return False
else:
return True
@classmethod
def _copy_default(cls, default):
if not isinstance(default, types.FunctionType):
return copy(default)
else:
return default()
def _raw_default(self):
""" The raw_default() needs to be validated and transformed by prepare_value() before
use, and may also be replaced later by subclass overrides or by themes."""
return self._copy_default(self._default)
def themed_default(self, cls, name, theme_overrides):
"""The default transformed by prepare_value() and the theme overrides."""
overrides = theme_overrides
if overrides is None or name not in overrides:
overrides = cls._overridden_defaults()
if name in overrides:
default = self._copy_default(overrides[name])
else:
default = self._raw_default()
return self.prepare_value(cls, name, default)
@property
def serialized(self):
"""True if the property should be serialized when serializing an object.
This would be False for a "virtual" or "convenience" property that duplicates
information already available in other properties, for example.
"""
return self._serialized
def matches(self, new, old):
# XXX: originally this code warned about not being able to compare values, but that
# doesn't make sense, because most comparisons involving numpy arrays will fail with
# ValueError exception, thus warning about inevitable.
try:
if new is None or old is None:
return new is old # XXX: silence FutureWarning from NumPy
else:
return new == old
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
# if we cannot compare (e.g. arrays) just punt return False for match
pass
return False
def from_json(self, json, models=None):
""" Convert from JSON-compatible values (list, dict, number, string, bool, None)
into a value for this property."""
return json
def transform(self, value):
"""Change the value into the canonical format for this property."""
return value
def validate(self, value):
"""Check whether we can set this property from this value (called before transform())."""
pass
def is_valid(self, value):
try:
self.validate(value)
except ValueError:
return False
else:
return True
@classmethod
def _wrap_container(cls, value):
if isinstance(value, list):
if isinstance(value, PropertyValueList):
return value
else:
return PropertyValueList(value)
elif isinstance(value, dict):
if isinstance(value, PropertyValueDict):
return value
else:
return PropertyValueDict(value)
else:
return value
def prepare_value(self, cls, name, value):
try:
self.validate(value)
except ValueError as e:
for tp, converter in self.alternatives:
if tp.is_valid(value):
value = converter(value)
break
else:
raise e
else:
value = self.transform(value)
return self._wrap_container(value)
@property
def has_ref(self):
return False
def accepts(self, tp, converter):
tp = ParameterizedPropertyDescriptor._validate_type_param(tp)
self.alternatives.append((tp, converter))
return self
def __or__(self, other):
return Either(self, other)
class Property(object):
""" A named attribute that can be read and written. """
def __init__(self, name):
self.name = name
def __str__(self):
return "Property(%s)" % (self.name)
def __get__(self, obj, owner=None):
raise NotImplementedError("Implement __get__")
def __set__(self, obj, value):
raise NotImplementedError("Implement __set__")
def __delete__(self, obj):
raise NotImplementedError("Implement __delete__")
def class_default(self, cls):
""" The default as computed for a certain class, ignoring any per-instance theming."""
raise NotImplementedError("Implement class_default()")
def serializable_value(self, obj):
"""Gets the value as it should be serialized, which differs from
the __get__ value occasionally when we allow the __get__
value to appear simpler for developer convenience.
"""
return self.__get__(obj)
def set_from_json(self, obj, json, models):
"""Sets from a JSON value.
"""
return self.__set__(obj, json)
@property
def serialized(self):
""" True if the property should be serialized when serializing an object.
This would be False for a "virtual" or "convenience" property that duplicates
information already available in other properties, for example.
"""
raise NotImplementedError("Implement serialized()")
@property
def has_ref(self):
""" True if the property can refer to another HasProps instance."""
raise NotImplementedError("Implement has_ref()")
def trigger_if_changed(self, obj, old):
""" Send a change event if the property's value is not equal to ``old``. """
raise NotImplementedError("Implement trigger_if_changed()")
class BasicProperty(Property):
""" A PropertyDescriptor associated with a class attribute name, so it can be read and written. """
def __init__(self, descriptor, name):
super(BasicProperty, self).__init__(name)
self.descriptor = descriptor
self.__doc__ = self.descriptor.__doc__
def __str__(self):
return "%s:%s" % (self.name, self.descriptor)
def class_default(self, cls):
"""Get the default value for a specific subtype of HasProps,
which may not be used for an individual instance."""
return self.descriptor.themed_default(cls, self.name, None)
def instance_default(self, obj):
""" Get the default value that will be used for a specific instance."""
return self.descriptor.themed_default(obj.__class__, self.name, obj.themed_values())
@property
def serialized(self):
return self.descriptor.serialized
def set_from_json(self, obj, json, models=None):
"""Sets using the result of serializable_value().
"""
return super(BasicProperty, self).set_from_json(obj,
self.descriptor.from_json(json, models),
models)
@property
def has_ref(self):
return self.descriptor.has_ref
def _get(self, obj):
if not hasattr(obj, '_property_values'):
raise RuntimeError("Cannot get a property value '%s' from a %s instance before HasProps.__init__" %
(self.name, obj.__class__.__name__))
if self.name not in obj._property_values:
return self._get_default(obj)
else:
return obj._property_values[self.name]
def __get__(self, obj, owner=None):
if obj is not None:
return self._get(obj)
elif owner is not None:
return self
else:
raise ValueError("both 'obj' and 'owner' are None, don't know what to do")
def _trigger(self, obj, old, value):
if hasattr(obj, 'trigger'):
obj.trigger(self.name, old, value)
def _get_default(self, obj):
if self.name in obj._property_values:
# this shouldn't happen because we should have checked before _get_default()
raise RuntimeError("Bokeh internal error, does not handle the case of self.name already in _property_values")
# merely getting a default may force us to put it in
# _property_values if we need to wrap the container, if
# the default is a Model that may change out from
# underneath us, or if the default is generated anew each
# time by a function.
default = self.instance_default(obj)
if not self.descriptor._has_stable_default():
if isinstance(default, PropertyValueContainer):
# this is a special-case so we can avoid returning the container
# as a non-default or application-overridden value, when
# it has not been modified.
default._unmodified_default_value = True
default._register_owner(obj, self)
obj._property_values[self.name] = default
return default
def _real_set(self, obj, old, value):
unchanged = self.descriptor.matches(value, old)
if unchanged:
return
was_set = self.name in obj._property_values
# "old" is the logical old value, but it may not be
# the actual current attribute value if our value
# was mutated behind our back and we got _notify_mutated.
if was_set:
old_attr_value = obj._property_values[self.name]
else:
old_attr_value = old
if old_attr_value is not value:
if isinstance(old_attr_value, PropertyValueContainer):
old_attr_value._unregister_owner(obj, self)
if isinstance(value, PropertyValueContainer):
value._register_owner(obj, self)
obj._property_values[self.name] = value
# for notification purposes, "old" should be the logical old
self._trigger(obj, old, value)
def __set__(self, obj, value):
if not hasattr(obj, '_property_values'):
# Initial values should be passed in to __init__, not set directly
raise RuntimeError("Cannot set a property value '%s' on a %s instance before HasProps.__init__" %
(self.name, obj.__class__.__name__))
value = self.descriptor.prepare_value(obj.__class__, self.name, value)
old = self.__get__(obj)
self._real_set(obj, old, value)
# called when a container is mutated "behind our back" and
# we detect it with our collection wrappers. In this case,
# somewhat weirdly, "old" is a copy and the new "value"
# should already be set unless we change it due to
# validation.
def _notify_mutated(self, obj, old):
value = self.__get__(obj)
# re-validate because the contents of 'old' have changed,
# in some cases this could give us a new object for the value
value = self.descriptor.prepare_value(obj.__class__, self.name, value)
self._real_set(obj, old, value)
def __delete__(self, obj):
if self.name in obj._property_values:
del obj._property_values[self.name]
def trigger_if_changed(self, obj, old):
new_value = self.__get__(obj)
if not self.descriptor.matches(old, new_value):
self._trigger(obj, old, new_value)
class Include(PropertyFactory):
""" Include other properties from mixin Models, with a given prefix. """
def __init__(self, delegate, help="", use_prefix=True):
if not (isinstance(delegate, type) and issubclass(delegate, HasProps)):
raise ValueError("expected a subclass of HasProps, got %r" % delegate)
self.delegate = delegate
self.help = help
self.use_prefix = use_prefix
def make_properties(self, base_name):
props = []
delegate = self.delegate
if self.use_prefix:
prefix = re.sub("_props$", "", base_name) + "_"
else:
prefix = ""
# it would be better if we kept the original generators from
# the delegate and built our Include props from those, perhaps.
for subpropname in delegate.properties(with_bases=False):
fullpropname = prefix + subpropname
subprop = delegate.lookup(subpropname)
if isinstance(subprop, BasicProperty):
descriptor = copy(subprop.descriptor)
if "%s" in self.help:
doc = self.help % subpropname.replace('_', ' ')
else:
doc = self.help
descriptor.__doc__ = doc
props += descriptor.make_properties(fullpropname)
return props
class Override(object):
""" Override aspects of the PropertyDescriptor from a superclass. """
def __init__(self, **kwargs):
if len(kwargs) == 0:
raise ValueError("Override() doesn't override anything, needs keyword args")
self.default_overridden = 'default' in kwargs
if self.default_overridden:
self.default = kwargs.pop('default')
if len(kwargs) > 0:
raise ValueError("Unknown keyword args to Override: %r" % (kwargs))
_EXAMPLE_TEMPLATE = """
Example
-------
.. bokeh-plot:: ../%(path)s
:source-position: none
*source:* `%(path)s <https://github.com/bokeh/bokeh/tree/master/%(path)s>`_
"""
class MetaHasProps(type):
def __new__(meta_cls, class_name, bases, class_dict):
names = set()
names_with_refs = set()
container_names = set()
# Now handle all the Override
overridden_defaults = {}
for name, prop in class_dict.items():
if not isinstance(prop, Override):
continue
if prop.default_overridden:
overridden_defaults[name] = prop.default
for name, default in overridden_defaults.items():
del class_dict[name]
generators = dict()
for name, generator in class_dict.items():
if isinstance(generator, PropertyFactory):
generators[name] = generator
elif isinstance(generator, type) and issubclass(generator, PropertyFactory):
# Support the user adding a property without using parens,
# i.e. using just the Property subclass instead of an
# instance of the subclass
generators[name] = generator.autocreate()
dataspecs = {}
new_class_attrs = {}
def add_prop(prop):
name = prop.name
if name in new_class_attrs:
raise RuntimeError("Two property generators both created %s.%s" % (class_name, name))
new_class_attrs[name] = prop
names.add(name)
if prop.has_ref:
names_with_refs.add(name)
if isinstance(prop, BasicProperty):
if isinstance(prop.descriptor, ContainerProperty):
container_names.add(name)
if isinstance(prop.descriptor, DataSpec):
dataspecs[name] = prop
for name, generator in generators.items():
props = generator.make_properties(name)
replaced_self = False
for prop in props:
if prop.name in generators:
if generators[prop.name] is generator:
# a generator can replace itself, this is the
# standard case like `foo = Int()`
replaced_self = True
add_prop(prop)
else:
# if a generator tries to overwrite another
# generator that's been explicitly provided,
# use the prop that was manually provided
# and ignore this one.
pass
else:
add_prop(prop)
# if we won't overwrite ourselves anyway, delete the generator
if not replaced_self:
del class_dict[name]
class_dict.update(new_class_attrs)
class_dict["__properties__"] = names
class_dict["__properties_with_refs__"] = names_with_refs
class_dict["__container_props__"] = container_names
if len(overridden_defaults) > 0:
class_dict["__overridden_defaults__"] = overridden_defaults
if dataspecs:
class_dict["__dataspecs__"] = dataspecs
if "__example__" in class_dict:
path = class_dict["__example__"]
class_dict["__doc__"] += _EXAMPLE_TEMPLATE % dict(path=path)
return super(MetaHasProps, meta_cls).__new__(meta_cls, class_name, bases, class_dict)
def __init__(cls, class_name, bases, nmspc):
if class_name == 'HasProps':
return
# Check for improperly overriding a Property attribute.
# Overriding makes no sense except through the Override
# class which can be used to tweak the default.
# Historically code also tried changing the Property's
# type or changing from Property to non-Property: these
# overrides are bad conceptually because the type of a
# read-write property is invariant.
cls_attrs = cls.__dict__.keys() # we do NOT want inherited attrs here
for attr in cls_attrs:
for base in bases:
if issubclass(base, HasProps) and attr in base.properties():
warn(('Property "%s" in class %s was overridden by a class attribute ' + \
'"%s" in class %s; it never makes sense to do this. ' + \
'Either %s.%s or %s.%s should be removed, or %s.%s should not ' + \
'be a Property, or use Override(), depending on the intended effect.') %
(attr, base.__name__, attr, class_name,
base.__name__, attr,
class_name, attr,
base.__name__, attr),
RuntimeWarning, stacklevel=2)
if "__overridden_defaults__" in cls.__dict__:
our_props = cls.properties()
for key in cls.__dict__["__overridden_defaults__"].keys():
if key not in our_props:
warn(('Override() of %s in class %s does not override anything.') % (key, class_name),
RuntimeWarning, stacklevel=2)
def accumulate_from_superclasses(cls, propname):
cachename = "__cached_all" + propname
# we MUST use cls.__dict__ NOT hasattr(). hasattr() would also look at base
# classes, and the cache must be separate for each class
if cachename not in cls.__dict__:
s = set()
for c in inspect.getmro(cls):
if issubclass(c, HasProps) and hasattr(c, propname):
base = getattr(c, propname)
s.update(base)
setattr(cls, cachename, s)
return cls.__dict__[cachename]
def accumulate_dict_from_superclasses(cls, propname):
cachename = "__cached_all" + propname
# we MUST use cls.__dict__ NOT hasattr(). hasattr() would also look at base
# classes, and the cache must be separate for each class
if cachename not in cls.__dict__:
d = dict()
for c in inspect.getmro(cls):
if issubclass(c, HasProps) and hasattr(c, propname):
base = getattr(c, propname)
for k,v in base.items():
if k not in d:
d[k] = v
setattr(cls, cachename, d)
return cls.__dict__[cachename]
def abstract(cls):
""" A phony decorator to mark abstract base classes. """
if not issubclass(cls, HasProps):
raise TypeError("%s is not a subclass of HasProps" % cls.__name__)
return cls
class HasProps(with_metaclass(MetaHasProps, object)):
def __init__(self, **properties):
super(HasProps, self).__init__()
self._property_values = dict()
for name, value in properties.items():
setattr(self, name, value)
def __setattr__(self, name, value):
# self.properties() below can be expensive so avoid it
# if we're just setting a private underscore field
if name.startswith("_"):
super(HasProps, self).__setattr__(name, value)
return
props = sorted(self.properties())
deprecated = getattr(self, '__deprecated_attributes__', [])
if name in props or name in deprecated:
super(HasProps, self).__setattr__(name, value)
else:
matches, text = difflib.get_close_matches(name.lower(), props), "similar"
if not matches:
matches, text = props, "possible"
raise AttributeError("unexpected attribute '%s' to %s, %s attributes are %s" %
(name, self.__class__.__name__, text, nice_join(matches)))
def set_from_json(self, name, json, models=None):
"""Sets a property of the object using JSON and a dictionary from model ids to model instances.
The model instances are necessary if the JSON contains references to models.
"""
if name in self.properties():
#logger.debug("Patching attribute %s of %r", attr, patched_obj)
prop = self.lookup(name)
prop.set_from_json(self, json, models)
else:
logger.warn("JSON had attr %r on obj %r, which is a client-only or invalid attribute that shouldn't have been sent", name, self)
def update(self, **kwargs):
""" Updates the object's properties from the given keyword args."""
for k,v in kwargs.items():
setattr(self, k, v)
def update_from_json(self, json_attributes, models=None):
""" Updates the object's properties from a JSON attributes dictionary."""
for k, v in json_attributes.items():
self.set_from_json(k, v, models)
def _clone(self):
""" Returns a duplicate of this object with all its properties
set appropriately. Values which are containers are shallow-copied.
"""
return self.__class__(**self._property_values)
@classmethod
def lookup(cls, name):
return getattr(cls, name)
@classmethod
def properties_with_refs(cls):
""" Returns a set of the names of this object's properties that
have references. We traverse the class hierarchy and
pull together the full list of properties.
"""
return accumulate_from_superclasses(cls, "__properties_with_refs__")
@classmethod
def properties_containers(cls):
""" Returns a list of properties that are containers.
"""
return accumulate_from_superclasses(cls, "__container_props__")
@classmethod
def properties(cls, with_bases=True):
"""Returns a set of the names of this object's properties. If
``with_bases`` is True, we traverse the class hierarchy
and pull together the full list of properties; if False,
we only return the properties introduced in the class
itself.
Args:
with_bases (bool, optional) : True to include properties that haven't been set.
(default: True)
Returns:
a set of property names
"""
if with_bases:
return accumulate_from_superclasses(cls, "__properties__")
else:
return set(cls.__properties__)
@classmethod
def _overridden_defaults(cls):
""" Returns a dictionary of defaults that have been overridden; this is an implementation detail of PropertyDescriptor. """
return accumulate_dict_from_superclasses(cls, "__overridden_defaults__")
@classmethod
def dataspecs(cls):
""" Returns a set of the names of this object's dataspecs (and
dataspec subclasses). Traverses the class hierarchy.
"""
return set(cls.dataspecs_with_props().keys())
@classmethod
def dataspecs_with_props(cls):
""" Returns a dict of dataspec names to dataspec properties. """
return accumulate_dict_from_superclasses(cls, "__dataspecs__")
def properties_with_values(self, include_defaults=True):
''' Get a dict from property names to the current values of those properties.
Non-serializable properties are skipped and property values are in "serialized"
format which may be slightly different from the values you would normally
read from the properties; the intent of this method is to return the information
needed to losslessly reconstitute the object instance.
Args:
include_defaults (bool) : True to include properties that haven't been set.
Returns:
dict : from property names to their values
'''
result = dict()
if include_defaults:
keys = self.properties()
else:
keys = self._property_values.keys()
for key in keys:
prop = self.lookup(key)
if not prop.serialized:
continue
value = prop.serializable_value(self)
if not include_defaults:
if isinstance(value, PropertyValueContainer) and value._unmodified_default_value:
continue
result[key] = value
return result
def set(self, **kwargs):
""" Sets a number of properties at once """
for kw in kwargs:
setattr(self, kw, kwargs[kw])
def themed_values(self):
""" Get any theme-provided overrides as a dict from property name to value,
or None if no theme overrides any values for this instance. """
if hasattr(self, '__themed_values__'):
return getattr(self, '__themed_values__')
else:
return None
def apply_theme(self, property_values):
""" Apply a set of theme values which will be used rather than
defaults, but will not override application-set
values. The passed-in dictionary may be kept around as-is
and shared with other instances to save memory (so neither
the caller nor the HasProps instance should modify it).
"""
old_dict = None
if hasattr(self, '__themed_values__'):
old_dict = getattr(self, '__themed_values__')
# if the same theme is set again, it should reuse the
# same dict
if old_dict is property_values:
return
removed = set()
# we're doing a little song-and-dance to avoid storing __themed_values__ or
# an empty dict, if there's no theme that applies to this HasProps instance.
if old_dict is not None:
removed.update(set(old_dict.keys()))
added = set(property_values.keys())
old_values = dict()
for k in added.union(removed):
old_values[k] = getattr(self, k)
if len(property_values) > 0:
setattr(self, '__themed_values__', property_values)
elif hasattr(self, '__themed_values__'):
delattr(self, '__themed_values__')
# Emit any change notifications that result
for k, v in old_values.items():
prop = self.lookup(k)
prop.trigger_if_changed(self, v)
def unapply_theme(self):
self.apply_theme(property_values=dict())
def pprint_props(self, indent=0):
""" Prints the properties of this object, nicely formatted """
for key, value in self.properties_with_values().items():
print("%s%s: %r" % (" "*indent, key, value))
class PrimitiveProperty(PropertyDescriptor):
""" A base class for simple property types. Subclasses should
define a class attribute ``_underlying_type`` that is a tuple
of acceptable type values for the property.
"""
_underlying_type = None
def validate(self, value):
super(PrimitiveProperty, self).validate(value)
if not (value is None or isinstance(value, self._underlying_type)):
raise ValueError("expected a value of type %s, got %s of type %s" %
(nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))
def from_json(self, json, models=None):
if json is None or isinstance(json, self._underlying_type):
return json
else:
expected = nice_join([ cls.__name__ for cls in self._underlying_type ])
raise DeserializationError("%s expected %s, got %s" % (self, expected, json))
class Bool(PrimitiveProperty):
""" Boolean type property. """
_underlying_type = bokeh_bool_types
class Int(PrimitiveProperty):
""" Signed integer type property. """
_underlying_type = bokeh_integer_types
class Float(PrimitiveProperty):
""" Floating point type property. """
_underlying_type = (numbers.Real,)
class Complex(PrimitiveProperty):
""" Complex floating point type property. """
_underlying_type = (numbers.Complex,)
class String(PrimitiveProperty):
""" String type property. """
_underlying_type = string_types
class Regex(String):
""" Regex type property validates that text values match the
given regular expression.
"""
def __init__(self, regex, default=None, help=None):
self.regex = re.compile(regex)
super(Regex, self).__init__(default=default, help=help)
def validate(self, value):
super(Regex, self).validate(value)
if not (value is None or self.regex.match(value) is not None):
raise ValueError("expected a string matching %r pattern, got %r" % (self.regex.pattern, value))
def __str__(self):
return "%s(%r)" % (self.__class__.__name__, self.regex.pattern)
class JSON(String):
""" JSON type property validates that text values are valid JSON.
.. note::
The string is transmitted and received by BokehJS as a *string*
containing JSON content. i.e., you must use ``JSON.parse`` to unpack
the value into a JavaScript hash.
"""
def validate(self, value):
super(JSON, self).validate(value)
if value is None: return
try:
import json
json.loads(value)
except ValueError:
raise ValueError("expected JSON text, got %r" % value)
class ParameterizedPropertyDescriptor(PropertyDescriptor):
""" Base class for Properties that have type parameters, e.g.
``List(String)``.
"""
@staticmethod
def _validate_type_param(type_param):
if isinstance(type_param, type):
if issubclass(type_param, PropertyDescriptor):
return type_param()
else:
type_param = type_param.__name__
elif isinstance(type_param, PropertyDescriptor):
return type_param
raise ValueError("expected a PropertyDescriptor as type parameter, got %s" % type_param)
@property
def type_params(self):
raise NotImplementedError("abstract method")
@property
def has_ref(self):
return any(type_param.has_ref for type_param in self.type_params)
class ContainerProperty(ParameterizedPropertyDescriptor):
""" Base class for Container-like type properties. """
def _has_stable_default(self):
# all containers are mutable, so the default can be modified
return False
class Seq(ContainerProperty):
""" Sequence (list, tuple) type property.
"""
def _is_seq(self, value):
return isinstance(value, collections.Container) and not isinstance(value, collections.Mapping)
def _new_instance(self, value):
return value
def __init__(self, item_type, default=None, help=None):
self.item_type = self._validate_type_param(item_type)
super(Seq, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.item_type]
def validate(self, value):
super(Seq, self).validate(value)
if value is not None:
if not (self._is_seq(value) and all(self.item_type.is_valid(item) for item in value)):
if self._is_seq(value):
invalid = []
for item in value:
if not self.item_type.is_valid(item):
invalid.append(item)
raise ValueError("expected an element of %s, got seq with invalid items %r" % (self, invalid))
else:
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.item_type)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, list):
return self._new_instance([ self.item_type.from_json(item, models) for item in json ])
else:
raise DeserializationError("%s expected a list or None, got %s" % (self, json))
class List(Seq):
""" Python list type property.
"""
def __init__(self, item_type, default=[], help=None):
# todo: refactor to not use mutable objects as default values.
# Left in place for now because we want to allow None to express
# optional values. Also in Dict.
super(List, self).__init__(item_type, default=default, help=help)
def _is_seq(self, value):
return isinstance(value, list)
class Array(Seq):
""" NumPy array type property.
"""
def _is_seq(self, value):
import numpy as np
return isinstance(value, np.ndarray)
def _new_instance(self, value):
import numpy as np
return np.array(value)
class Dict(ContainerProperty):
""" Python dict type property.
If a default value is passed in, then a shallow copy of it will be
used for each new use of this property.
"""
def __init__(self, keys_type, values_type, default={}, help=None):
self.keys_type = self._validate_type_param(keys_type)
self.values_type = self._validate_type_param(values_type)
super(Dict, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.keys_type, self.values_type]
def validate(self, value):
super(Dict, self).validate(value)
if value is not None:
if not (isinstance(value, dict) and \
all(self.keys_type.is_valid(key) and self.values_type.is_valid(val) for key, val in iteritems(value))):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.keys_type, self.values_type)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, dict):
return { self.keys_type.from_json(key, models): self.values_type.from_json(value, models) for key, value in iteritems(json) }
else:
raise DeserializationError("%s expected a dict or None, got %s" % (self, json))
class Tuple(ContainerProperty):
""" Tuple type property. """
def __init__(self, tp1, tp2, *type_params, **kwargs):
self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))
super(Tuple, self).__init__(default=kwargs.get("default"), help=kwargs.get("help"))
@property
def type_params(self):
return self._type_params
def validate(self, value):
super(Tuple, self).validate(value)
if value is not None:
if not (isinstance(value, (tuple, list)) and len(self.type_params) == len(value) and \
all(type_param.is_valid(item) for type_param, item in zip(self.type_params, value))):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, list):
return tuple(type_param.from_json(item, models) for type_param, item in zip(self.type_params, json))
else:
raise DeserializationError("%s expected a list or None, got %s" % (self, json))
class Instance(PropertyDescriptor):
""" Instance type property, for references to other Models in the object
graph.
"""
def __init__(self, instance_type, default=None, help=None):
if not isinstance(instance_type, (type,) + string_types):
raise ValueError("expected a type or string, got %s" % instance_type)
if isinstance(instance_type, type) and not issubclass(instance_type, HasProps):
raise ValueError("expected a subclass of HasProps, got %s" % instance_type)
self._instance_type = instance_type
super(Instance, self).__init__(default=default, help=help)
@property
def instance_type(self):
if isinstance(self._instance_type, str):
module, name = self._instance_type.rsplit(".", 1)
self._instance_type = getattr(import_module(module, "bokeh"), name)
return self._instance_type
def _has_stable_default(self):
# because the instance value is mutable
return False
@property
def has_ref(self):
return True
def validate(self, value):
super(Instance, self).validate(value)
if value is not None:
if not isinstance(value, self.instance_type):
raise ValueError("expected an instance of type %s, got %s of type %s" %
(self.instance_type.__name__, value, type(value).__name__))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.instance_type.__name__)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, dict):
from .model import Model
if issubclass(self.instance_type, Model):
if models is None:
raise DeserializationError("%s can't deserialize without models" % self)
else:
model = models.get(json["id"])
if model is not None:
return model
else:
raise DeserializationError("%s failed to deserialize reference to %s" % (self, json))
else:
attrs = {}
for name, value in iteritems(json):
prop = self.instance_type.lookup(name)
attrs[name] = prop.from_json(value, models)
# XXX: this doesn't work when Instance(Superclass) := Subclass()
# Serialization dict must carry type information to resolve this.
return self.instance_type(**attrs)
else:
raise DeserializationError("%s expected a dict or None, got %s" % (self, json))
class This(PropertyDescriptor):
""" A reference to an instance of the class being defined. """
pass
# Fake types, ABCs
class Any(PropertyDescriptor):
""" Any type property accepts any values. """
pass
class Function(PropertyDescriptor):
""" Function type property. """
pass
class Event(PropertyDescriptor):
""" Event type property. """
pass
class Interval(ParameterizedPropertyDescriptor):
''' Range type property ensures values are contained inside a given interval. '''
def __init__(self, interval_type, start, end, default=None, help=None):
self.interval_type = self._validate_type_param(interval_type)
# Make up a property name for validation purposes
self.interval_type.validate(start)
self.interval_type.validate(end)
self.start = start
self.end = end
super(Interval, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.interval_type]
def validate(self, value):
super(Interval, self).validate(value)
if not (value is None or self.interval_type.is_valid(value) and value >= self.start and value <= self.end):
raise ValueError("expected a value of type %s in range [%s, %s], got %r" % (self.interval_type, self.start, self.end, value))
def __str__(self):
return "%s(%s, %r, %r)" % (self.__class__.__name__, self.interval_type, self.start, self.end)
class Byte(Interval):
''' Byte type property. '''
def __init__(self, default=0, help=None):
super(Byte, self).__init__(Int, 0, 255, default=default, help=help)
class Either(ParameterizedPropertyDescriptor):
""" Takes a list of valid properties and validates against them in succession. """
def __init__(self, tp1, tp2, *type_params, **kwargs):
self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))
help = kwargs.get("help")
def choose_default():
return self._type_params[0]._raw_default()
default = kwargs.get("default", choose_default)
super(Either, self).__init__(default=default, help=help)
@property
def type_params(self):
return self._type_params
def validate(self, value):
super(Either, self).validate(value)
if not (value is None or any(param.is_valid(value) for param in self.type_params)):
raise ValueError("expected an element of either %s, got %r" % (nice_join(self.type_params), value))
def transform(self, value):
for param in self.type_params:
try:
return param.transform(value)
except ValueError:
pass
raise ValueError("Could not transform %r" % value)
def from_json(self, json, models=None):
for tp in self.type_params:
try:
return tp.from_json(json, models)
except DeserializationError:
pass
else:
raise DeserializationError("%s couldn't deserialize %s" % (self, json))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
def __or__(self, other):
return self.__class__(*(self.type_params + [other]), default=self._default, help=self.help)
class Enum(String):
""" An Enum with a list of allowed values. The first value in the list is
the default value, unless a default is provided with the "default" keyword
argument.
"""
def __init__(self, enum, *values, **kwargs):
if not (not values and isinstance(enum, enums.Enumeration)):
enum = enums.enumeration(enum, *values)
self._enum = enum
default = kwargs.get("default", enum._default)
help = kwargs.get("help")
super(Enum, self).__init__(default=default, help=help)
@property
def allowed_values(self):
return self._enum._values
def validate(self, value):
super(Enum, self).validate(value)
if not (value is None or value in self._enum):
raise ValueError("invalid value: %r; allowed values are %s" % (value, nice_join(self.allowed_values)))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, self.allowed_values)))
class Auto(Enum):
def __init__(self):
super(Auto, self).__init__("auto")
def __str__(self):
return self.__class__.__name__
# Properties useful for defining visual attributes
class Color(Either):
""" Accepts color definition in a variety of ways, and produces an
appropriate serialization of its value for whatever backend.
For colors, because we support named colors and hex values prefaced
with a "#", when we are handed a string value, there is a little
interpretation: if the value is one of the 147 SVG named colors or
it starts with a "#", then it is interpreted as a value.
If a 3-tuple is provided, then it is treated as an RGB (0..255).
If a 4-tuple is provided, then it is treated as an RGBa (0..255), with
alpha as a float between 0 and 1. (This follows the HTML5 Canvas API.)
"""
def __init__(self, default=None, help=None):
types = (Enum(enums.NamedColor),
Regex("^#[0-9a-fA-F]{6}$"),
Tuple(Byte, Byte, Byte),
Tuple(Byte, Byte, Byte, Percent))
super(Color, self).__init__(*types, default=default, help=help)
def __str__(self):
return self.__class__.__name__
class Align(PropertyDescriptor):
pass
class DashPattern(Either):
""" Dash type property.
Express patterns that describe line dashes. ``DashPattern`` values
can be specified in a variety of ways:
* An enum: "solid", "dashed", "dotted", "dotdash", "dashdot"
* a tuple or list of integers in the `HTML5 Canvas dash specification style`_.
Note that if the list of integers has an odd number of elements, then
it is duplicated, and that duplicated list becomes the new dash list.
To indicate that dashing is turned off (solid lines), specify the empty
list [].
.. _HTML5 Canvas dash specification style: http://www.w3.org/html/wg/drafts/2dcontext/html5_canvas/#dash-list
"""
_dash_patterns = {
"solid": [],
"dashed": [6],
"dotted": [2,4],
"dotdash": [2,4,6,4],
"dashdot": [6,4,2,4],
}
def __init__(self, default=[], help=None):
types = Enum(enums.DashPattern), Regex(r"^(\d+(\s+\d+)*)?$"), Seq(Int)
super(DashPattern, self).__init__(*types, default=default, help=help)
def transform(self, value):
value = super(DashPattern, self).transform(value)
if isinstance(value, string_types):
try:
return self._dash_patterns[value]
except KeyError:
return [int(x) for x in value.split()]
else:
return value
def __str__(self):
return self.__class__.__name__
class Size(Float):
""" Size type property.
.. note::
``Size`` is equivalent to an unsigned int.
"""
def validate(self, value):
super(Size, self).validate(value)
if not (value is None or 0.0 <= value):
raise ValueError("expected a non-negative number, got %r" % value)
class Percent(Float):
""" Percentage type property.
Percents are useful for specifying alphas and coverage and extents; more
semantically meaningful than Float(0..1).
"""
def validate(self, value):
super(Percent, self).validate(value)
if not (value is None or 0.0 <= value <= 1.0):
raise ValueError("expected a value in range [0, 1], got %r" % value)
class Angle(Float):
""" Angle type property. """
pass
class Date(PropertyDescriptor):
""" Date (not datetime) type property.
"""
def __init__(self, default=datetime.date.today(), help=None):
super(Date, self).__init__(default=default, help=help)
def validate(self, value):
super(Date, self).validate(value)
if not (value is None or isinstance(value, (datetime.date,) + string_types + (float,) + bokeh_integer_types)):
raise ValueError("expected a date, string or timestamp, got %r" % value)
def transform(self, value):
value = super(Date, self).transform(value)
if isinstance(value, (float,) + bokeh_integer_types):
try:
value = datetime.date.fromtimestamp(value)
except ValueError:
value = datetime.date.fromtimestamp(value/1000)
elif isinstance(value, string_types):
value = dateutil.parser.parse(value).date()
return value
class Datetime(PropertyDescriptor):
""" Datetime type property.
"""
def __init__(self, default=datetime.date.today(), help=None):
super(Datetime, self).__init__(default=default, help=help)
def validate(self, value):
super(Datetime, self).validate(value)
datetime_types = (datetime.datetime, datetime.date)
try:
import numpy as np
datetime_types += (np.datetime64,)
except ImportError:
pass
if (isinstance(value, datetime_types)):
return
try:
import pandas
if isinstance(value, (pandas.Timestamp)):
return
except ImportError:
pass
raise ValueError("Expected a datetime instance, got %r" % value)
def transform(self, value):
value = super(Datetime, self).transform(value)
return value
# Handled by serialization in protocol.py for now
class RelativeDelta(Dict):
""" RelativeDelta type property for time deltas.
"""
def __init__(self, default={}, help=None):
keys = Enum("years", "months", "days", "hours", "minutes", "seconds", "microseconds")
values = Int
super(RelativeDelta, self).__init__(keys, values, default=default, help=help)
def __str__(self):
return self.__class__.__name__
class DataSpecProperty(BasicProperty):
""" A Property with a DataSpec descriptor."""
def serializable_value(self, obj):
return self.descriptor.to_serializable(obj, self.name, getattr(obj, self.name))
def set_from_json(self, obj, json, models=None):
# we want to try to keep the "format" of the data spec as string, dict, or number,
# assuming the serialized dict is compatible with that.
old = getattr(obj, self.name)
if old is not None:
try:
self.descriptor._type.validate(old)
if 'value' in json:
json = json['value']
except ValueError:
if isinstance(old, string_types) and 'field' in json:
json = json['field']
# leave it as a dict if 'old' was a dict
super(DataSpecProperty, self).set_from_json(obj, json, models)
class DataSpec(Either):
def __init__(self, typ, default, help=None):
super(DataSpec, self).__init__(String, Dict(String, Either(String, typ)), typ, default=default, help=help)
self._type = self._validate_type_param(typ)
def make_properties(self, base_name):
return [ DataSpecProperty(descriptor=self, name=base_name) ]
def to_serializable(self, obj, name, val):
# Check for None value; this means "the whole thing is
# unset," not "the value is None."
if val is None:
return None
# Check for spec type value
try:
self._type.validate(val)
return dict(value=val)
except ValueError:
pass
# Check for data source field name
if isinstance(val, string_types):
return dict(field=val)
# Must be dict, return as-is
return val
class NumberSpec(DataSpec):
def __init__(self, default=None, help=None):
super(NumberSpec, self).__init__(Float, default=default, help=help)
class StringSpec(DataSpec):
def __init__(self, default, help=None):
super(StringSpec, self).__init__(List(String), default=default, help=help)
def prepare_value(self, cls, name, value):
if isinstance(value, list):
if len(value) != 1:
raise TypeError("StringSpec convenience list values must have length 1")
value = dict(value=value[0])
return super(StringSpec, self).prepare_value(cls, name, value)
class FontSizeSpec(DataSpec):
def __init__(self, default, help=None):
super(FontSizeSpec, self).__init__(List(String), default=default, help=help)
def prepare_value(self, cls, name, value):
if isinstance(value, string_types):
warn('Setting a fixed font size value as a string %r is deprecated, '
'set with value(%r) or [%r] instead' % (value, value, value),
DeprecationWarning, stacklevel=2)
if len(value) > 0 and value[0].isdigit():
value = dict(value=value)
return super(FontSizeSpec, self).prepare_value(cls, name, value)
class UnitsSpecProperty(DataSpecProperty):
""" A Property that sets a matching `_units` property as a side effect."""
def __init__(self, descriptor, name, units_prop):
super(UnitsSpecProperty, self).__init__(descriptor, name)
self.units_prop = units_prop
def _extract_units(self, obj, value):
if isinstance(value, dict):
units = value.pop("units", None)
if units:
self.units_prop.__set__(obj, units)
def __set__(self, obj, value):
self._extract_units(obj, value)
super(UnitsSpecProperty, self).__set__(obj, value)
def set_from_json(self, obj, json, models=None):
self._extract_units(obj, json)
super(UnitsSpecProperty, self).set_from_json(obj, json, models)
class UnitsSpec(NumberSpec):
def __init__(self, default, units_type, units_default, help=None):
super(UnitsSpec, self).__init__(default=default, help=help)
self._units_type = self._validate_type_param(units_type)
# this is a hack because we already constructed units_type
self._units_type.validate(units_default)
self._units_type._default = units_default
# this is sort of a hack because we don't have a
# serialized= kwarg on every PropertyDescriptor subtype
self._units_type._serialized = False
def make_properties(self, base_name):
units_name = base_name + "_units"
units_props = self._units_type.make_properties(units_name)
return units_props + [ UnitsSpecProperty(descriptor=self, name=base_name, units_prop=units_props[0]) ]
def to_serializable(self, obj, name, val):
d = super(UnitsSpec, self).to_serializable(obj, name, val)
if d is not None and 'units' not in d:
d["units"] = getattr(obj, name+"_units")
return d
def __str__(self):
return "%s(units_default=%r)" % (self.__class__.__name__, self._units_type._default)
class AngleSpec(UnitsSpec):
def __init__(self, default=None, units_default="rad", help=None):
super(AngleSpec, self).__init__(default=default, units_type=Enum(enums.AngleUnits), units_default=units_default, help=help)
class DistanceSpec(UnitsSpec):
def __init__(self, default=None, units_default="data", help=None):
super(DistanceSpec, self).__init__(default=default, units_type=Enum(enums.SpatialUnits), units_default=units_default, help=help)
def prepare_value(self, cls, name, value):
try:
if value is not None and value < 0:
raise ValueError("Distances must be positive or None!")
except TypeError:
pass
return super(DistanceSpec, self).prepare_value(cls, name, value)
class ScreenDistanceSpec(NumberSpec):
def to_serializable(self, obj, name, val):
d = super(ScreenDistanceSpec, self).to_serializable(obj, name, val)
d["units"] = "screen"
return d
def prepare_value(self, cls, name, value):
try:
if value is not None and value < 0:
raise ValueError("Distances must be positive or None!")
except TypeError:
pass
return super(ScreenDistanceSpec, self).prepare_value(cls, name, value)
class DataDistanceSpec(NumberSpec):
def to_serializable(self, obj, name, val):
d = super(ScreenDistanceSpec, self).to_serializable(obj, name, val)
d["units"] = "data"
return d
def prepare_value(self, cls, name, value):
try:
if value is not None and value < 0:
raise ValueError("Distances must be positive or None!")
except TypeError:
pass
return super(DataDistanceSpec, self).prepare_value(cls, name, value)
class ColorSpec(DataSpec):
def __init__(self, default, help=None):
super(ColorSpec, self).__init__(Color, default=default, help=help)
@classmethod
def isconst(cls, arg):
""" Returns True if the argument is a literal color. Check for a
well-formed hexadecimal color value.
"""
return isinstance(arg, string_types) and \
((len(arg) == 7 and arg[0] == "#") or arg in enums.NamedColor)
@classmethod
def is_color_tuple(cls, val):
return isinstance(val, tuple) and len(val) in (3, 4)
@classmethod
def format_tuple(cls, colortuple):
if len(colortuple) == 3:
return "rgb%r" % (colortuple,)
else:
return "rgba%r" % (colortuple,)
def to_serializable(self, obj, name, val):
if val is None:
return dict(value=None)
# Check for hexadecimal or named color
if self.isconst(val):
return dict(value=val)
# Check for RGB or RGBa tuple
if isinstance(val, tuple):
return dict(value=self.format_tuple(val))
# Check for data source field name
if isinstance(val, string_types):
return dict(field=val)
# Must be dict, return as-is
return val
def validate(self, value):
try:
return super(ColorSpec, self).validate(value)
except ValueError as e:
# Check for tuple input if not yet a valid input type
if self.is_color_tuple(value):
return True
else:
raise e
def transform(self, value):
# Make sure that any tuple has either three integers, or three integers and one float
if isinstance(value, tuple):
value = tuple(int(v) if i < 3 else v for i, v in enumerate(value))
return value
| bsd-3-clause |
rspavel/spack | var/spack/repos/builtin/packages/paraview/package.py | 1 | 13837 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class Paraview(CMakePackage, CudaPackage):
"""ParaView is an open-source, multi-platform data analysis and
visualization application."""
homepage = 'https://www.paraview.org'
url = "https://www.paraview.org/files/v5.7/ParaView-v5.7.0.tar.xz"
list_url = "https://www.paraview.org/files"
list_depth = 1
git = "https://gitlab.kitware.com/paraview/paraview.git"
maintainers = ['chuckatkins', 'danlipsa']
version('develop', branch='master', submodules=True)
version('5.8.1', sha256='7653950392a0d7c0287c26f1d3a25cdbaa11baa7524b0af0e6a1a0d7d487d034')
version('5.8.0', sha256='219e4107abf40317ce054408e9c3b22fb935d464238c1c00c0161f1c8697a3f9')
version('5.7.0', sha256='e41e597e1be462974a03031380d9e5ba9a7efcdb22e4ca2f3fec50361f310874')
version('5.6.2', sha256='1f3710b77c58a46891808dbe23dc59a1259d9c6b7bb123aaaeaa6ddf2be882ea')
version('5.6.0', sha256='cb8c4d752ad9805c74b4a08f8ae6e83402c3f11e38b274dba171b99bb6ac2460')
version('5.5.2', sha256='64561f34c4402b88f3cb20a956842394dde5838efd7ebb301157a837114a0e2d')
version('5.5.1', sha256='a6e67a95a7a5711a2b5f95f38ccbff4912262b3e1b1af7d6b9afe8185aa85c0d')
version('5.5.0', sha256='1b619e326ff574de808732ca9a7447e4cd14e94ae6568f55b6581896cd569dff')
version('5.4.1', sha256='390d0f5dc66bf432e202a39b1f34193af4bf8aad2355338fa5e2778ea07a80e4')
version('5.4.0', sha256='f488d84a53b1286d2ee1967e386626c8ad05a6fe4e6cbdaa8d5e042f519f94a9')
version('5.3.0', sha256='046631bbf00775edc927314a3db207509666c9c6aadc7079e5159440fd2f88a0')
version('5.2.0', sha256='894e42ef8475bb49e4e7e64f4ee2c37c714facd18bfbb1d6de7f69676b062c96')
version('5.1.2', sha256='ff02b7307a256b7c6e8ad900dee5796297494df7f9a0804fe801eb2f66e6a187')
version('5.0.1', sha256='caddec83ec284162a2cbc46877b0e5a9d2cca59fb4ab0ea35b0948d2492950bb')
version('4.4.0', sha256='c2dc334a89df24ce5233b81b74740fc9f10bc181cd604109fd13f6ad2381fc73')
variant('plugins', default=True,
description='Install include files for plugins support')
variant('python', default=False, description='Enable Python support')
variant('python3', default=False, description='Enable Python3 support')
variant('mpi', default=True, description='Enable MPI support')
variant('osmesa', default=False, description='Enable OSMesa support')
variant('qt', default=False, description='Enable Qt (gui) support')
variant('opengl2', default=True, description='Enable OpenGL2 backend')
variant('examples', default=False, description="Build examples")
variant('hdf5', default=False, description="Use external HDF5")
variant('shared', default=True,
description='Builds a shared version of the library')
variant('kits', default=True,
description='Use module kits')
conflicts('+python', when='+python3')
# Python 2 support dropped with 5.9.0
conflicts('+python', when='@5.9:')
conflicts('+python3', when='@:5.5')
conflicts('+shared', when='+cuda')
# Legacy rendering dropped in 5.5
# See commit: https://gitlab.kitware.com/paraview/paraview/-/commit/798d328c
conflicts('~opengl2', when='@5.5:')
# Workaround for
# adding the following to your packages.yaml
# packages:
# python:
# version: [3, 2]
# without this you'll get:
# paraview requires python version 3:, but spec asked for 2.7.16
# for `spack spec paraview+python+osmesa`
# see spack pull request #11539
extends('python', when='+python')
extends('python', when='+python3')
depends_on('[email protected]:2.8', when='+python', type=('build', 'run'))
depends_on('python@3:', when='+python3', type=('build', 'run'))
depends_on('py-numpy@:1.15.4', when='+python', type=('build', 'run'))
depends_on('py-numpy', when='+python3', type=('build', 'run'))
depends_on('py-mpi4py', when='+python+mpi', type=('build', 'run'))
depends_on('py-mpi4py', when='+python3+mpi', type=('build', 'run'))
depends_on('py-matplotlib@:2', when='+python', type='run')
depends_on('py-matplotlib', when='+python3', type='run')
depends_on('mpi', when='+mpi')
depends_on('qt+opengl', when='@5.3.0:+qt+opengl2')
depends_on('qt~opengl', when='@5.3.0:+qt~opengl2')
depends_on('qt@:4', when='@:5.2.0+qt')
depends_on('mesa+osmesa', when='+osmesa')
depends_on('[email protected]:', when='+opengl2')
depends_on('[email protected]:', when='~opengl2')
depends_on('libxt', when='~osmesa platform=linux')
conflicts('+qt', when='+osmesa')
depends_on('bzip2')
depends_on('freetype')
# depends_on('hdf5+mpi', when='+mpi')
# depends_on('hdf5~mpi', when='~mpi')
depends_on('hdf5+hl+mpi', when='+hdf5+mpi')
depends_on('hdf5+hl~mpi', when='+hdf5~mpi')
depends_on('jpeg')
depends_on('libpng')
depends_on('libtiff')
depends_on('libxml2')
depends_on('netcdf-c')
depends_on('expat')
# depends_on('netcdf-cxx')
# depends_on('protobuf') # version mismatches?
# depends_on('sqlite') # external version not supported
depends_on('zlib')
depends_on('[email protected]:', type='build')
# Can't contretize with python2 and [email protected]:
depends_on('py-setuptools@:44.99.99', when='+python')
# Can't contretize with python2 and [email protected]:
depends_on('py-pillow@:6', when='+python')
patch('stl-reader-pv440.patch', when='@4.4.0')
# Broken gcc-detection - improved in 5.1.0, redundant later
patch('gcc-compiler-pv501.patch', when='@:5.0.1')
# Broken installation (ui_pqExportStateWizard.h) - fixed in 5.2.0
patch('ui_pqExportStateWizard.patch', when='@:5.1.2')
# Broken vtk-m config. Upstream catalyst changes
patch('vtkm-catalyst-pv551.patch', when='@5.5.0:5.5.2')
def url_for_version(self, version):
_urlfmt = 'http://www.paraview.org/files/v{0}/ParaView-v{1}{2}.tar.{3}'
"""Handle ParaView version-based custom URLs."""
if version < Version('5.1.0'):
return _urlfmt.format(version.up_to(2), version, '-source', 'gz')
elif version < Version('5.6.1'):
return _urlfmt.format(version.up_to(2), version, '', 'gz')
else:
return _urlfmt.format(version.up_to(2), version, '', 'xz')
@property
def paraview_subdir(self):
"""The paraview subdirectory name as paraview-major.minor"""
return 'paraview-{0}'.format(self.spec.version.up_to(2))
def setup_dependent_build_environment(self, env, dependent_spec):
if os.path.isdir(self.prefix.lib64):
lib_dir = self.prefix.lib64
else:
lib_dir = self.prefix.lib
env.set('ParaView_DIR', self.prefix)
env.set('PARAVIEW_VTK_DIR',
join_path(lib_dir, 'cmake', self.paraview_subdir))
def setup_run_environment(self, env):
# paraview 5.5 and later
# - cmake under lib/cmake/paraview-5.5
# - libs under lib
# - python bits under lib/python2.8/site-packages
if os.path.isdir(self.prefix.lib64):
lib_dir = self.prefix.lib64
else:
lib_dir = self.prefix.lib
env.set('ParaView_DIR', self.prefix)
env.set('PARAVIEW_VTK_DIR',
join_path(lib_dir, 'cmake', self.paraview_subdir))
if self.spec.version <= Version('5.4.1'):
lib_dir = join_path(lib_dir, self.paraview_subdir)
env.prepend_path('LIBRARY_PATH', lib_dir)
env.prepend_path('LD_LIBRARY_PATH', lib_dir)
if '+python' in self.spec or '+python3' in self.spec:
if self.spec.version <= Version('5.4.1'):
pv_pydir = join_path(lib_dir, 'site-packages')
env.prepend_path('PYTHONPATH', pv_pydir)
env.prepend_path('PYTHONPATH', join_path(pv_pydir, 'vtk'))
else:
python_version = self.spec['python'].version.up_to(2)
pv_pydir = join_path(lib_dir,
'python{0}'.format(python_version),
'site-packages')
if '+shared' in self.spec or \
self.spec.version <= Version('5.7.0'):
env.prepend_path('PYTHONPATH', pv_pydir)
# The Trilinos Catalyst adapter requires
# the vtkmodules directory in PYTHONPATH
env.prepend_path('PYTHONPATH', join_path(pv_pydir,
'vtkmodules'))
else:
env.prepend_path('PYTHONPATH', join_path(pv_pydir,
'_paraview.zip'))
env.prepend_path('PYTHONPATH', join_path(pv_pydir,
'_vtk.zip'))
def cmake_args(self):
"""Populate cmake arguments for ParaView."""
spec = self.spec
def variant_bool(feature, on='ON', off='OFF'):
"""Ternary for spec variant to ON/OFF string"""
if feature in spec:
return on
return off
def nvariant_bool(feature):
"""Negated ternary for spec variant to OFF/ON string"""
return variant_bool(feature, on='OFF', off='ON')
rendering = variant_bool('+opengl2', 'OpenGL2', 'OpenGL')
includes = variant_bool('+plugins')
cmake_args = [
'-DPARAVIEW_BUILD_QT_GUI:BOOL=%s' % variant_bool('+qt'),
'-DVTK_OPENGL_HAS_OSMESA:BOOL=%s' % variant_bool('+osmesa'),
'-DVTK_USE_X:BOOL=%s' % nvariant_bool('+osmesa'),
'-DVTK_RENDERING_BACKEND:STRING=%s' % rendering,
'-DPARAVIEW_INSTALL_DEVELOPMENT_FILES:BOOL=%s' % includes,
'-DBUILD_TESTING:BOOL=OFF',
'-DBUILD_EXAMPLES:BOOL=%s' % variant_bool('+examples'),
'-DVTK_USE_SYSTEM_FREETYPE:BOOL=ON',
'-DVTK_USE_SYSTEM_HDF5:BOOL=%s' % variant_bool('+hdf5'),
'-DVTK_USE_SYSTEM_JPEG:BOOL=ON',
'-DVTK_USE_SYSTEM_LIBXML2:BOOL=ON',
'-DVTK_USE_SYSTEM_NETCDF:BOOL=ON',
'-DVTK_USE_SYSTEM_EXPAT:BOOL=ON',
'-DVTK_USE_SYSTEM_TIFF:BOOL=ON',
'-DVTK_USE_SYSTEM_ZLIB:BOOL=ON',
'-DVTK_USE_SYSTEM_PNG:BOOL=ON',
'-DOpenGL_GL_PREFERENCE:STRING=LEGACY'
]
# The assumed qt version changed to QT5 (as of paraview 5.2.1),
# so explicitly specify which QT major version is actually being used
if '+qt' in spec:
cmake_args.extend([
'-DPARAVIEW_QT_VERSION=%s' % spec['qt'].version[0],
])
# CMake flags for python have changed with newer ParaView versions
# Make sure Spack uses the right cmake flags
if '+python' in spec or '+python3' in spec:
py_use_opt = 'USE' if spec.satisfies('@5.8:') else 'ENABLE'
py_ver_opt = 'PARAVIEW' if spec.satisfies('@5.7:') else 'VTK'
py_ver_val = 3 if '+python3' in spec else 2
cmake_args.extend([
'-DPARAVIEW_%s_PYTHON:BOOL=ON' % py_use_opt,
'-DPYTHON_EXECUTABLE:FILEPATH=%s' %
spec['python'].command.path,
'-DVTK_USE_SYSTEM_MPI4PY:BOOL=%s' % variant_bool('+mpi'),
'-D%s_PYTHON_VERSION:STRING=%d' % (py_ver_opt, py_ver_val)
])
else:
cmake_args.append('-DPARAVIEW_ENABLE_PYTHON:BOOL=OFF')
if '+mpi' in spec:
cmake_args.extend([
'-DPARAVIEW_USE_MPI:BOOL=ON',
'-DMPIEXEC:FILEPATH=%s/bin/mpiexec' % spec['mpi'].prefix,
'-DMPI_CXX_COMPILER:PATH=%s' % spec['mpi'].mpicxx,
'-DMPI_C_COMPILER:PATH=%s' % spec['mpi'].mpicc,
'-DMPI_Fortran_COMPILER:PATH=%s' % spec['mpi'].mpifc
])
if '+shared' in spec:
cmake_args.append(
'-DPARAVIEW_BUILD_SHARED_LIBS:BOOL=ON'
)
else:
cmake_args.append(
'-DPARAVIEW_BUILD_SHARED_LIBS:BOOL=OFF'
)
if '+cuda' in spec:
cmake_args.extend([
'-DPARAVIEW_USE_CUDA:BOOL=ON',
'-DPARAVIEW_BUILD_SHARED_LIBS:BOOL=OFF'
])
else:
cmake_args.extend([
'-DPARAVIEW_USE_CUDA:BOOL=OFF',
])
if 'darwin' in spec.architecture:
cmake_args.extend([
'-DVTK_USE_X:BOOL=OFF',
'-DPARAVIEW_DO_UNIX_STYLE_INSTALLS:BOOL=ON',
])
if '+kits' in spec:
if spec.satisfies('@5.0:5.6'):
cmake_args.append(
'-DVTK_ENABLE_KITS:BOOL=ON')
elif spec.satisfies('@5.7'):
cmake_args.append(
'-DPARAVIEW_ENABLE_KITS:BOOL=ON')
else:
cmake_args.append(
'-DPARAVIEW_BUILD_WITH_KITS:BOOL=ON')
# Hide git from Paraview so it will not use `git describe`
# to find its own version number
if spec.satisfies('@5.4.0:5.4.1'):
cmake_args.extend([
'-DGIT_EXECUTABLE=FALSE'
])
# A bug that has been found in vtk causes an error for
# intel builds for version 5.6. This should be revisited
# with later versions of Paraview to see if the issues still
# arises.
if '%intel' in spec and spec.version >= Version('5.6'):
cmake_args.append('-DPARAVIEW_ENABLE_MOTIONFX:BOOL=OFF')
return cmake_args
| lgpl-2.1 |
vybstat/scikit-learn | sklearn/linear_model/passive_aggressive.py | 97 | 10879 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None):
BaseSGDClassifier.__init__(self,
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False):
BaseSGDRegressor.__init__(self,
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause |
Evfro/polara | polara/recommender/models.py | 1 | 47667 | from functools import wraps
from collections import namedtuple
import warnings
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
import pandas as pd
import numpy as np
import scipy as sp
from scipy.sparse import coo_matrix, csr_matrix
from scipy.sparse.linalg import svds
from polara.recommender import defaults
from polara.recommender.evaluation import get_hits, get_relevance_scores, get_ranking_scores, get_experience_scores
from polara.recommender.evaluation import get_hr_score, get_rr_scores
from polara.recommender.evaluation import assemble_scoring_matrices
from polara.recommender.evaluation import matrix_from_observations
from polara.recommender.utils import array_split
from polara.lib.optimize import simple_pmf_sgd
from polara.lib.tensor import hooi
from polara.preprocessing.matrices import rescale_matrix
from polara.lib.sampler import mf_random_item_scoring
from polara.lib.sparse import sparse_dot, inverse_permutation
from polara.lib.sparse import inner_product_at
from polara.lib.sparse import unfold_tensor_coordinates, tensor_outer_at
from polara.tools.timing import track_time
def get_default(name):
return defaults.get_config([name])[name]
def clean_build_decorator(build_func):
# this ensures that every time the build function is called,
# all cached recommendations are cleared
@wraps(build_func)
def wrapper(self, *args, **kwargs):
self._is_ready = False
self._recommendations = None
build_res = build_func(self, *args, **kwargs)
self._is_ready = True
return build_res
return wrapper
def with_metaclass(mcls):
# this is used to ensure python 2/3 interoperablity, taken from:
# https://stackoverflow.com/questions/22409430/portable-meta-class-between-python2-and-python3
def decorator(cls):
body = vars(cls).copy()
# clean out class body
body.pop('__dict__', None)
body.pop('__weakref__', None)
return mcls(cls.__name__, cls.__bases__, body)
return decorator
class MetaModel(type):
# performs cleaning of the instance when build method is called
# propagates the action to any subclasses, key idea is borrowed from here:
# https://stackoverflow.com/questions/18858759/python-decorating-a-class-method-that-is-intended-to-be-overwritten-when-inheri
def __new__(mcs, name, bases, clsdict):
cls = super(MetaModel, mcs).__new__(mcs, name, bases, clsdict)
if 'build' in clsdict:
setattr(cls, 'build', clean_build_decorator(clsdict['build']))
return cls
@with_metaclass(MetaModel)
class RecommenderModel(object):
_config = ('topk', 'filter_seen', 'switch_positive', 'feedback_threshold', 'verify_integrity')
_pad_const = -1 # used for sparse data
def __init__(self, recommender_data, feedback_threshold=None):
self.data = recommender_data
self._recommendations = None
self.method = 'ABC'
self._topk = get_default('topk')
self._filter_seen = get_default('filter_seen')
self._feedback_threshold = feedback_threshold or get_default('feedback_threshold')
self.switch_positive = get_default('switch_positive')
self.verify_integrity = get_default('verify_integrity')
self.max_test_workers = get_default('max_test_workers')
# TODO sorting in data must be by self._prediction_key, also need to change get_test_data
self._prediction_key = self.data.fields.userid
self._prediction_target = self.data.fields.itemid
self._is_ready = False
self.verbose = True
self.training_time = [] # setting to None will prevent storing time
self.data.subscribe(self.data.on_change_event, self._renew_model)
self.data.subscribe(self.data.on_update_event, self._refresh_model)
@property
def recommendations(self):
if self._recommendations is None:
if not self._is_ready:
if self.verbose:
print('{} model is not ready. Rebuilding.'.format(self.method))
self.build()
self._recommendations = self.get_recommendations()
return self._recommendations
def _renew_model(self):
self._recommendations = None
self._is_ready = False
def _refresh_model(self):
self._recommendations = None
@property
def topk(self):
return self._topk
@topk.setter
def topk(self, new_value):
# support rolling back scenarion for @k calculations
if (self._recommendations is not None) and (new_value > self._recommendations.shape[1]):
self._recommendations = None # if topk is too high - recalculate recommendations
self._topk = new_value
@property
def feedback_threshold(self):
return self._feedback_threshold
@feedback_threshold.setter
def feedback_threshold(self, new_value):
if self._feedback_threshold != new_value:
self._feedback_threshold = new_value
self._renew_model()
@property
def filter_seen(self):
return self._filter_seen
@filter_seen.setter
def filter_seen(self, new_value):
if self._filter_seen != new_value:
self._filter_seen = new_value
self._refresh_model()
def get_base_configuration(self):
config = {attr: getattr(self, attr) for attr in self._config}
return config
def build(self):
raise NotImplementedError('This must be implemented in subclasses')
def get_training_matrix(self, feedback_threshold=None, ignore_feedback=False,
sparse_format='csr', dtype=None):
threshold = feedback_threshold or self.feedback_threshold
# the line below also updates data if needed and triggers notifier
idx, val, shp = self.data.to_coo(tensor_mode=False,
feedback_threshold=threshold)
dtype = dtype or val.dtype
if ignore_feedback: # for compatibility with non-numeric tensor feedback data
val = np.ones_like(val, dtype=dtype)
matrix = coo_matrix((val, (idx[:, 0], idx[:, 1])),
shape=shp, dtype=dtype)
if sparse_format == 'csr':
return matrix.tocsr()
elif sparse_format == 'csc':
return matrix.tocsc()
elif sparse_format == 'coo':
matrix.sum_duplicates()
return matrix
def get_test_matrix(self, test_data=None, shape=None, user_slice=None, dtype=None, ignore_feedback=False):
if test_data is None:
test_data, shape, _ = self._get_test_data()
elif shape is None:
raise ValueError('Shape of test data must be provided')
num_users_all = shape[0]
if user_slice:
start, stop = user_slice
stop = min(stop, num_users_all)
num_users = stop - start
coo_data = self._slice_test_data(test_data, start, stop)
else:
num_users = num_users_all
coo_data = test_data
user_coo, item_coo, fdbk_coo = coo_data
valid_fdbk = fdbk_coo != 0
if not valid_fdbk.all():
user_coo = user_coo[valid_fdbk]
item_coo = item_coo[valid_fdbk]
fdbk_coo = fdbk_coo[valid_fdbk]
dtype = dtype or fdbk_coo.dtype
if ignore_feedback: # for compatibility with non-numeric tensor feedback data
fdbk_coo = np.ones_like(fdbk_coo, dtype=dtype)
num_items = shape[1]
test_matrix = csr_matrix((fdbk_coo, (user_coo, item_coo)),
shape=(num_users, num_items),
dtype=dtype)
return test_matrix, coo_data
def _get_slices_idx(self, shape, result_width=None, scores_multiplier=None, dtypes=None):
result_width = result_width or self.topk
if scores_multiplier is None:
try:
fdbk_dim = self.factors.get(self.data.fields.feedback, None).shape
scores_multiplier = fdbk_dim[1]
except AttributeError:
scores_multiplier = 1
slices_idx = array_split(shape, result_width, scores_multiplier, dtypes=dtypes)
return slices_idx
def _get_test_data(self, feedback_threshold=None):
try:
tensor_mode = self.factors.get(self.data.fields.feedback, None) is not None
except AttributeError:
tensor_mode = False
test_shape = self.data.get_test_shape(tensor_mode=tensor_mode)
threshold = feedback_threshold or self.feedback_threshold
if self.data.warm_start:
if threshold:
print('Specifying threshold has no effect in warm start.')
threshold = None
else:
if self.data.test_sample and (threshold is not None):
print('Specifying both threshold value and test_sample may change test data.')
user_idx, item_idx, feedback = self.data.test_to_coo(tensor_mode=tensor_mode, feedback_threshold=threshold)
idx_diff = np.diff(user_idx)
# TODO sorting by self._prediction_key
assert (idx_diff >= 0).all() # calculations assume testset is sorted by users!
# TODO only required when testset consists of known users
if (idx_diff > 1).any() or (user_idx.min() != 0): # check index monotonicity
test_users = user_idx[np.r_[0, np.where(idx_diff)[0]+1]]
user_idx = np.r_[0, np.cumsum(idx_diff > 0)].astype(user_idx.dtype)
else:
test_users = np.arange(test_shape[0])
test_data = (user_idx, item_idx, feedback)
return test_data, test_shape, test_users
@staticmethod
def _slice_test_data(test_data, start, stop):
user_coo, item_coo, fdbk_coo = test_data
slicer = (user_coo >= start) & (user_coo < stop)
# always slice over users only
user_slice_coo = user_coo[slicer] - start
item_slice_coo = item_coo[slicer]
fdbk_slice_coo = fdbk_coo[slicer]
return (user_slice_coo, item_slice_coo, fdbk_slice_coo)
def slice_recommendations(self, test_data, shape, start, stop, test_users=None):
raise NotImplementedError('This must be implemented in subclasses')
def _user_scores(self, i):
# should not be exposed, designed for use within framework
# operates on internal itemid's
if not self._is_ready:
if self.verbose:
print('{} model is not ready. Rebuilding.'.format(self.method))
self.build()
test_data, test_shape, test_users = self._get_test_data()
if not self.data.warm_start:
i, = np.where(test_users == i)[0]
scores, seen_idx = self.slice_recommendations(test_data, test_shape, i, i+1)
if self.filter_seen:
self.downvote_seen_items(scores, seen_idx)
return scores, seen_idx
def _make_user(self, user_info):
# converts external user info into internal representation
userid, itemid, feedback = self.data.fields
if isinstance(user_info, dict): # item:feedback dictionary
items_data, feedback_data = zip(*user_info.items())
elif isinstance(user_info, (list, tuple, set, np.ndarray)): # list of items
items_data = user_info
feedback_data = {}
if feedback is not None:
feedback_val = self.data.training[feedback].max()
feedback_data = {feedback: [feedback_val]*len(items_data)}
else:
raise ValueError("Unrecognized input for `user_info`.")
try:
item_index = self.data.index.itemid.training
except AttributeError:
item_index = self.data.index.itemid
# need to convert itemid's to internal representation
# conversion is not required for feedback (it's made in *to_coo functions, if needed)
items_data = item_index.set_index('old').loc[items_data, 'new'].values
user_data = {userid: [0]*len(items_data), itemid: items_data}
user_data.update(feedback_data)
return pd.DataFrame(user_data)
def show_recommendations(self, user_info, topk=None):
# convenience function to model users and get recs
# operates on external itemid's
if isinstance(user_info, int):
scores, seen_idx = self._user_scores(user_info)
else:
testset = self.data.test.testset
holdout = self.data.test.holdout
user_data = self._make_user(user_info)
try:
# makes a "fake" test user
self.data._test = namedtuple('TestData', 'testset holdout')._make([user_data, None])
scores, seen_idx = self._user_scores(0)
finally:
# restore original data - prevent information loss
self.data._test = namedtuple('TestData', 'testset holdout')._make([testset, holdout])
_topk = self.topk
if topk is not None:
self.topk = topk
try:
# takes care of both sparse and dense recommendation lists
top_recs = self.get_topk_elements(scores).squeeze() # remove singleton
finally:
self.topk = _topk
seen_idx = seen_idx[1] # only items idx
# covert back to external representation
item_index = self.data.get_entity_index(self.data.fields.itemid)
item_idx_map = item_index.set_index('new')
top_recs = item_idx_map.loc[top_recs, 'old'].values
seen_items = item_idx_map.loc[seen_idx, 'old'].values
return top_recs, seen_items
def _slice_recommender(self, user_slice, test_data, test_shape, test_users):
start, stop = user_slice
scores, slice_data = self.slice_recommendations(test_data, test_shape, start, stop, test_users)
if self.filter_seen:
# prevent seen items from appearing in recommendations
# NOTE: in case of sparse models (e.g. simple item-to-item)
# there's a risk of having seen items in recommendations list
# (for topk < i2i_matrix.shape[1]-len(unseen))
# this is related to low generalization ability
# of the naive cooccurrence method itself, not to the algorithm
self.downvote_seen_items(scores, slice_data)
top_recs = self.get_topk_elements(scores)
return top_recs
def run_parallel_recommender(self, result, user_slices, *args):
with ThreadPoolExecutor(max_workers=self.max_test_workers) as executor:
recs_futures = {executor.submit(self._slice_recommender,
user_slice, *args): user_slice
for user_slice in user_slices}
for future in as_completed(recs_futures):
start, stop = recs_futures[future]
result[start:stop, :] = future.result()
def run_sequential_recommender(self, result, user_slices, *args):
for user_slice in user_slices:
start, stop = user_slice
result[start:stop, :] = self._slice_recommender(user_slice, *args)
def get_recommendations(self):
if self.verify_integrity:
self.verify_data_integrity()
test_data = self._get_test_data()
test_shape = test_data[1]
user_slices_idx = self._get_slices_idx(test_shape)
user_slices = zip(user_slices_idx[:-1], user_slices_idx[1:])
top_recs = np.empty((test_shape[0], self.topk), dtype=np.int64)
if self.max_test_workers and len(user_slices_idx) > 2:
self.run_parallel_recommender(top_recs, user_slices, *test_data)
else:
self.run_sequential_recommender(top_recs, user_slices, *test_data)
return top_recs
def evaluate(self, metric_type='all', topk=None, not_rated_penalty=None,
switch_positive=None, ignore_feedback=False, simple_rates=False,
on_feedback_level=None):
if metric_type == 'all':
metric_type = ['hits', 'relevance', 'ranking', 'experience']
if metric_type == 'main':
metric_type = ['relevance', 'ranking']
if not isinstance(metric_type, (list, tuple)):
metric_type = [metric_type]
if int(topk or 0) > self.topk:
self.topk = topk # will also flush old recommendations
# support rolling back scenario for @k calculations
recommendations = self.recommendations[:, :topk] # will recalculate if empty
switch_positive = switch_positive or self.switch_positive
feedback = self.data.fields.feedback
holdout = self.data.test.holdout
if (switch_positive is None) or (feedback is None):
# all recommendations are considered positive predictions
# this is a proper setting for binary data problems (implicit feedback)
# in this case all unrated items, recommended by an algorithm
# assumed to be "honest" false positives and therefore penalty equals 1
not_rated_penalty = 1 if not_rated_penalty is None else not_rated_penalty
is_positive = None
else:
# if data is not binary (explicit feedback), the intuition is different
# it becomes unclear whether unrated items are "honest" false positives
# as among these items can be both top rated and down-rated
# the defualt setting in this case is to ignore such items at all
# by setting penalty to 0, however, it is adjustable
not_rated_penalty = not_rated_penalty or 0
is_positive = (holdout[feedback] >= switch_positive).values
feedback = None if ignore_feedback else feedback
scoring_data = assemble_scoring_matrices(recommendations, holdout,
self._prediction_key, self._prediction_target,
is_positive, feedback=feedback)
scores = []
if 'relevance' in metric_type: # no need for feedback
if (self.data.holdout_size == 1) or simple_rates:
scores.append(get_hr_score(scoring_data[1]))
else:
scores.append(get_relevance_scores(*scoring_data, not_rated_penalty=not_rated_penalty))
if 'ranking' in metric_type:
if (self.data.holdout_size == 1) or simple_rates:
scores.append(get_rr_scores(scoring_data[1]))
else:
ndcg_alternative = get_default('ndcg_alternative')
topk = recommendations.shape[1] # handle topk=None case
# topk has to be passed explicitly, otherwise it's unclear how to
# estimate ideal ranking for NDCG and NDCL metrics in get_ndcr_discounts
# it's also used in MAP calculation
scores.append(get_ranking_scores(*scoring_data, topk=topk, switch_positive=switch_positive, alternative=ndcg_alternative))
if 'experience' in metric_type: # no need for feedback
fields = self.data.fields
# support custom scenarios, e.g. coldstart
entity_type = fields._fields[fields.index(self._prediction_target)]
entity_index = getattr(self.data.index, entity_type)
try:
n_entities = entity_index.shape[0]
except AttributeError:
n_entities = entity_index.training.shape[0]
scores.append(get_experience_scores(recommendations, n_entities))
if 'hits' in metric_type: # no need for feedback
scores.append(get_hits(*scoring_data, not_rated_penalty=not_rated_penalty))
if not scores:
raise NotImplementedError
if len(scores) == 1:
scores = scores[0]
return scores
@staticmethod
def topsort(a, topk):
parted = np.argpartition(a, -topk)[-topk:]
return parted[np.argsort(-a[parted])]
@staticmethod
def downvote_seen_items(recs, idx_seen):
# NOTE for sparse scores matrix this method can lead to a slightly worse
# results (comparing to the same method but with "densified" scores matrix)
# models with sparse scores can alleviate that by extending recommendations
# list with most popular items or items generated by a more sophisticated logic
idx_seen = idx_seen[:2] # need only users and items
if sp.sparse.issparse(recs):
ind_data = np.ones(len(idx_seen[0]), dtype=np.bool) # indicator
seen = coo_matrix((ind_data, idx_seen), shape=recs.shape, copy=False)
seen_recs = recs.multiply(seen)
# In the sparse case it's impossible to downvote seen items scores
# without making matrix dense. Have to simply make them 0.
recs -= seen_recs
# This, however, differs from the dense case results as seen
# items may appear earlier in the top-k list due to randomization
else:
try:
idx_seen_flat = np.ravel_multi_index(idx_seen, recs.shape)
except ValueError:
# make compatible for single user recommendations
idx_seen_flat = idx_seen
seen_data = recs.flat[idx_seen_flat]
# move seen items scores below minimum value
lowered = recs.min() - (seen_data.max() - seen_data) - 1
recs.flat[idx_seen_flat] = lowered
def get_topk_elements(self, scores):
topk = self.topk
if sp.sparse.issparse(scores):
assert scores.format == 'csr'
# there can be less then topk values in some rows
# need to extend sorted scores to conform with evaluation matrix shape
# can do this by adding -1's to the right, however:
# this relies on the fact that there are no -1's in evaluation matrix
# NOTE need to ensure that this is always true
def topscore(x, k):
data = x.data.values
cols = x.cols.values
nnz = len(data)
if k >= nnz:
cols_sorted = cols[np.argsort(-data)]
# need to pad values to conform with evaluation matrix shape
res = np.pad(cols_sorted, (0, k-nnz),
'constant', constant_values=self._pad_const)
else:
# TODO verify, that even if k is relatively small, then
# argpartition doesn't add too much overhead?
res = cols[self.topsort(data, k)]
return res
idx = scores.nonzero()
row_data = pd.DataFrame({'data': scores.data, 'cols': idx[1]}).groupby(idx[0], sort=True)
nnz_users = row_data.grouper.levels[0]
num_users = scores.shape[0]
if len(nnz_users) < num_users:
# scores may have zero-valued rows, this breaks get_topk_elements
# as scores.nonzero() will filter out indices of those rows.
# Need to restore full data with zeros in that case.
recs = np.empty((num_users, topk), dtype=idx[1].dtype)
zero_rows = np.in1d(np.arange(num_users), nnz_users, assume_unique=True, invert=True)
recs[zero_rows, :] = self._pad_const
recs[~zero_rows, :] = np.stack(row_data.apply(topscore, topk).tolist())
else:
recs = np.stack(row_data.apply(topscore, topk).tolist())
else:
# apply_along_axis is more memory efficient then argsort on full array
recs = np.apply_along_axis(self.topsort, 1, scores, topk)
return recs
@staticmethod
def orthogonalize(u, v, complete=False):
Qu, Ru = np.linalg.qr(u)
Qv, Rv = np.linalg.qr(v)
if complete:
# it's not needed for folding-in, as Ur and Vr will cancel out anyway
Ur, Sr, Vr = np.linalg.svd(Ru.dot(Rv.T))
U = Qu.dot(Ur)
V = Qv.dot(Vr.T)
else:
U, V = Qu, Qv
return U, V
def verify_data_integrity(self):
data = self.data
userid, itemid, feedback = data.fields
try:
item_index = data.index.itemid.training
except AttributeError:
item_index = data.index.itemid
nunique_items = data.training[itemid].nunique()
assert nunique_items == item_index.shape[0]
assert nunique_items == data.training[itemid].max() + 1
testset = data.test.testset
if testset is not None:
nunique_test_users = testset[userid].nunique()
if data._state == 4:
assert nunique_test_users == testset[userid].max() + 1
try:
assert self.factors.get(itemid, None).shape[0] == item_index.shape[0]
assert self.factors.get(feedback, None).shape[0] == data.index.feedback.shape[0]
except AttributeError:
pass
class NonPersonalized(RecommenderModel):
def __init__(self, kind, *args, **kwargs):
deprecation_msg = '''This is a deprecated method.
Use either PopularityModel or RandomModel instead.'''
warnings.warn(deprecation_msg, DeprecationWarning)
super(NonPersonalized, self).__init__(*args, **kwargs)
self.method = kind
def build(self):
pass
def get_recommendations(self):
userid, itemid, feedback = self.data.fields
test_data = self.data.test.testset
test_idx = (test_data[userid].values.astype(np.int64),
test_data[itemid].values.astype(np.int64))
num_users = self.data.test.testset[userid].max() + 1
if self.method == 'mostpopular':
items_scores = self.data.training.groupby(itemid, sort=True).size().values
# scores = np.lib.stride_tricks.as_strided(items_scores, (num_users, items_scores.size), (0, items_scores.itemsize))
scores = np.repeat(items_scores[None, :], num_users, axis=0)
elif self.method == 'random':
num_items = self.data.training[itemid].max() + 1
scores = np.random.random((num_users, num_items))
elif self.method == 'topscore':
items_scores = self.data.training.groupby(itemid, sort=True)[feedback].sum().values
scores = np.repeat(items_scores[None, :], num_users, axis=0)
else:
raise NotImplementedError
if self.filter_seen:
# prevent seen items from appearing in recommendations
self.downvote_seen_items(scores, test_idx)
top_recs = self.get_topk_elements(scores)
return top_recs
class PopularityModel(RecommenderModel):
def __init__(self, *args, **kwargs):
super(PopularityModel, self).__init__(*args, **kwargs)
self.method = 'MP'
self.by_feedback_value = False
def build(self):
itemid = self.data.fields.itemid
item_groups = self.data.training.groupby(itemid, sort=True)
if self.by_feedback_value:
feedback = self.data.fields.feedback
self.item_scores = item_groups[feedback].sum().values
else:
self.item_scores = item_groups.size().values
def slice_recommendations(self, test_data, shape, start, stop, test_users=None):
slice_data = self._slice_test_data(test_data, start, stop)
n_users = stop - start
scores = np.repeat(self.item_scores[None, :], n_users, axis=0)
return scores, slice_data
class RandomModel(RecommenderModel):
def __init__(self, *args, **kwargs):
self.seed = kwargs.pop('seed', None)
super(RandomModel, self).__init__(*args, **kwargs)
self.method = 'RND'
def build(self):
try:
index_data = self.data.index.itemid.training
except AttributeError:
index_data = self.data.index.itemid
self.n_items = index_data.shape[0]
seed = self.seed
self._random_state = np.random.RandomState(seed) if seed is not None else np.random
def slice_recommendations(self, test_data, shape, start, stop, test_users=None):
slice_data = self._slice_test_data(test_data, start, stop)
n_users = stop - start
scores = self._random_state.rand(n_users, self.n_items)
return scores, slice_data
class CooccurrenceModel(RecommenderModel):
def __init__(self, *args, **kwargs):
super(CooccurrenceModel, self).__init__(*args, **kwargs)
self.method = 'item-to-item' # pick some meaningful name
self.implicit = False
self.dense_output = False
def build(self):
user_item_matrix = self.get_training_matrix()
if self.implicit:
# np.sign allows for negative values as well
user_item_matrix.data = np.sign(user_item_matrix.data)
with track_time(self.training_time, verbose=self.verbose, model=self.method):
i2i_matrix = user_item_matrix.T.dot(user_item_matrix) # gives CSC format
i2i_matrix.setdiag(0) # exclude "self-links"
i2i_matrix.eliminate_zeros()
self._i2i_matrix = i2i_matrix
def slice_recommendations(self, test_data, shape, start, stop, test_users=None):
test_matrix, slice_data = self.get_test_matrix(test_data, shape, (start, stop))
# NOTE CSR format is mandatory for proper handling of signle user
# recommendations, as vector of shape (1, N) in CSC format is inefficient
if self.implicit:
test_matrix.data = np.sign(test_matrix.data)
scores = sparse_dot(test_matrix, self._i2i_matrix, self.dense_output, True)
return scores, slice_data
class ProbabilisticMF(RecommenderModel):
def __init__(self, *args, **kwargs):
self.seed = kwargs.pop('seed', None)
super().__init__(*args, **kwargs)
self.method = 'PMF'
self.optimizer = simple_pmf_sgd
self.learn_rate = 0.005
self.sigma = 1
self.num_epochs = 25
self.rank = 10
self.tolerance = 1e-4
self.factors = {}
self.rmse_history = None
self.show_rmse = False
self.iterations_time = None
def build(self, *args, **kwargs):
matrix = self.get_training_matrix(sparse_format='coo', dtype='f8')
user_idx, item_idx = matrix.nonzero()
interactions = (user_idx, item_idx, matrix.data)
nonzero_count = (matrix.getnnz(axis=1), matrix.getnnz(axis=0))
rank = self.rank
lrate = self.learn_rate
sigma = self.sigma
num_epochs = self.num_epochs
tol = self.tolerance
self.rmse_history = []
self.iterations_time = []
general_config = dict(seed=self.seed,
verbose=self.show_rmse,
iter_errors=self.rmse_history,
iter_time=self.iterations_time)
with track_time(self.training_time, verbose=self.verbose, model=self.method):
P, Q = self.optimizer(interactions, matrix.shape, nonzero_count, rank,
lrate, sigma, num_epochs, tol,
*args,
**kwargs,
**general_config)
self.factors[self.data.fields.userid] = P
self.factors[self.data.fields.itemid] = Q
def get_recommendations(self):
if self.data.warm_start:
raise NotImplementedError
else:
return super().get_recommendations()
def slice_recommendations(self, test_data, shape, start, stop, test_users=None):
userid = self.data.fields.userid
itemid = self.data.fields.itemid
slice_data = self._slice_test_data(test_data, start, stop)
user_factors = self.factors[userid][test_users[start:stop], :]
item_factors = self.factors[itemid]
scores = user_factors.dot(item_factors.T)
return scores, slice_data
class EmbeddingsMixin:
@property
def user_embeddings(self):
return self.factors[self.data.fields.userid]
@property
def item_embeddings(self):
return self.factors[self.data.fields.itemid]
class SVDModel(RecommenderModel):
def __init__(self, *args, **kwargs):
super(SVDModel, self).__init__(*args, **kwargs)
self._rank = defaults.svd_rank
self.method = 'PureSVD'
self.factors = {}
@property
def rank(self):
return self._rank
@rank.setter
def rank(self, new_value):
if new_value != self._rank:
self._rank = new_value
self._check_reduced_rank(new_value)
self._recommendations = None
def _check_reduced_rank(self, rank):
for entity, factor in self.factors.items():
if factor is None:
continue
if factor.shape[-1] < rank:
self._is_ready = False
self.factors = dict.fromkeys(self.factors.keys())
break
else:
# avoid accidental overwrites if factors backup exists
self.factors = dict(**self.factors)
# ellipsis allows to handle 1d array of singular values
self.factors[entity] = factor[..., :rank]
def build(self, operator=None, return_factors='vh'):
if operator is not None:
svd_matrix = operator
else:
svd_matrix = self.get_training_matrix(dtype=np.float64)
svd_params = dict(k=self.rank, return_singular_vectors=return_factors)
with track_time(self.training_time, verbose=self.verbose, model=self.method):
user_factors, sigma, item_factors = svds(svd_matrix, **svd_params)
if user_factors is not None:
user_factors = np.ascontiguousarray(user_factors[:, ::-1])
if item_factors is not None:
item_factors = np.ascontiguousarray(item_factors[::-1, :]).T
if sigma is not None:
sigma = np.ascontiguousarray(sigma[::-1])
self.factors[self.data.fields.userid] = user_factors
self.factors[self.data.fields.itemid] = item_factors
self.factors['singular_values'] = sigma
def slice_recommendations(self, test_data, shape, start, stop, test_users=None):
test_matrix, slice_data = self.get_test_matrix(test_data, shape, (start, stop))
v = self.factors[self.data.fields.itemid]
scores = (test_matrix.dot(v)).dot(v.T)
return scores, slice_data
class ScaledMatrixMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._col_scaling = 0.4
self._row_scaling = 1
self.method = f'{self.method}-s'
@property
def col_scaling(self):
return self._col_scaling
@property
def row_scaling(self):
return self._row_scaling
@col_scaling.setter
def col_scaling(self, new_value):
if new_value != self._col_scaling:
self._col_scaling = new_value
self._recommendations = None
@row_scaling.setter
def row_scaling(self, new_value):
if new_value != self._row_scaling:
self._row_scaling = new_value
self._recommendations = None
def get_training_matrix(self, *args, **kwargs):
scaled_matrix = super().get_training_matrix(*args, **kwargs)
scaled_matrix = rescale_matrix(scaled_matrix, self.row_scaling, 1)
scaled_matrix = rescale_matrix(scaled_matrix, self.col_scaling, 0)
return scaled_matrix
class ScaledSVD(ScaledMatrixMixin, SVDModel): pass
class CoffeeModel(RecommenderModel):
def __init__(self, *args, **kwargs):
super(CoffeeModel, self).__init__(*args, **kwargs)
self._mlrank = defaults.mlrank
self.factors = {}
self.chunk = defaults.test_chunk_size
self.method = 'CoFFee'
self._flattener = defaults.flattener
self.growth_tol = defaults.growth_tol
self.num_iters = defaults.num_iters
self.show_output = defaults.show_output
self.seed = None
self._vectorize_target = defaults.test_vectorize_target
self.parallel_ttm = defaults.parallel_ttm
@property
def mlrank(self):
return self._mlrank
@mlrank.setter
def mlrank(self, new_value):
if new_value != self._mlrank:
self._mlrank = new_value
self._check_reduced_rank(new_value)
self._recommendations = None
@property
def flattener(self):
return self._flattener
@flattener.setter
def flattener(self, new_value):
old_value = self._flattener
if new_value != old_value:
self._flattener = new_value
self._recommendations = None
@property
def tensor_outer_at(self):
vtarget = self._vectorize_target.lower()
if self.max_test_workers and (vtarget == 'parallel'):
# force single thread for tensor_outer_at to safely run in parallel
vtarget = 'cpu'
return tensor_outer_at(vtarget)
def _check_reduced_rank(self, mlrank):
for mode, entity in enumerate(self.data.fields):
factor = self.factors.get(entity, None)
if factor is None:
continue
rank = mlrank[mode]
if factor.shape[1] < rank:
self._is_ready = False
self.factors = {}
break
elif factor.shape[1] == rank:
continue
else:
# avoid accidental overwrites if factors backup exists
self.factors = dict(**self.factors)
rfactor, new_core = self.round_core(self.factors['core'], mode, rank)
self.factors[entity] = factor.dot(rfactor)
self.factors['core'] = new_core
@staticmethod
def round_core(core, mode, rank):
new_dims = [mode] + [m for m in range(core.ndim) if m!=mode]
mode_dim = core.shape[mode]
flat_core = core.transpose(new_dims).reshape((mode_dim, -1), order='F')
u, s, vt = np.linalg.svd(flat_core, full_matrices=False)
rfactor = u[:, :rank]
new_core = (np.ascontiguousarray(s[:rank, np.newaxis]*vt[:rank, :])
.reshape(rank, *[core.shape[i] for i in new_dims[1:]], order='F')
.transpose(inverse_permutation(np.array(new_dims))))
return rfactor, new_core
@staticmethod
def flatten_scores(tensor_scores, flattener=None):
flattener = flattener or slice(None)
if isinstance(flattener, str):
slicer = slice(None)
flatten = getattr(np, flattener)
matrix_scores = flatten(tensor_scores[..., slicer], axis=-1)
elif isinstance(flattener, int):
slicer = flattener
matrix_scores = tensor_scores[..., slicer]
elif isinstance(flattener, (list, slice)):
slicer = flattener
flatten = np.sum
matrix_scores = flatten(tensor_scores[..., slicer], axis=-1)
elif isinstance(flattener, tuple):
slicer, flatten_method = flattener
slicer = slicer or slice(None)
flatten = getattr(np, flatten_method)
matrix_scores = flatten(tensor_scores[..., slicer], axis=-1)
elif callable(flattener):
matrix_scores = flattener(tensor_scores)
else:
raise ValueError('Unrecognized value for flattener attribute')
return matrix_scores
def build(self):
idx, val, shp = self.data.to_coo(tensor_mode=True)
with track_time(self.training_time, verbose=self.verbose, model=self.method):
(users_factors, items_factors,
feedback_factors, core) = hooi(idx, val, shp, self.mlrank,
growth_tol=self.growth_tol,
num_iters=self.num_iters,
verbose=self.show_output,
parallel_ttm=self.parallel_ttm,
seed=self.seed)
self.factors[self.data.fields.userid] = users_factors
self.factors[self.data.fields.itemid] = items_factors
self.factors[self.data.fields.feedback] = feedback_factors
self.factors['core'] = core
def unfold_test_tensor_slice(self, test_data, shape, start, stop, mode):
slice_idx = self._slice_test_data(test_data, start, stop)
num_users = stop - start
num_items = shape[1]
num_fdbks = shape[2]
slice_shp = (num_users, num_items, num_fdbks)
idx, shp = unfold_tensor_coordinates(slice_idx, slice_shp, mode)
val = np.ones_like(slice_idx[2], dtype=np.uint8)
test_tensor_unfolded = csr_matrix((val, idx), shape=shp, dtype=val.dtype)
return test_tensor_unfolded, slice_idx
def slice_recommendations(self, test_data, shape, start, stop, test_users=None):
slice_idx = self._slice_test_data(test_data, start, stop)
v = self.factors[self.data.fields.itemid]
w = self.factors[self.data.fields.feedback]
# use the fact that test data is sorted by users for reduction:
scores = self.tensor_outer_at(1.0, v, w, slice_idx[1], slice_idx[2])
scores = np.add.reduceat(scores, np.r_[0, np.where(np.diff(slice_idx[0]))[0]+1])
wt_flat = self.flatten_scores(w.T, self.flattener) # TODO cache result
scores = np.tensordot(scores, wt_flat, axes=(2, 0)).dot(v.T)
return scores, slice_idx
def get_holdout_slice(self, start, stop):
userid = self.data.fields.userid
itemid = self.data.fields.itemid
holdout = self.data.test.holdout
user_sel = (holdout[userid] >= start) & (holdout[userid] < stop)
holdout_users = holdout.loc[user_sel, userid].values.astype(np.int64) - start
holdout_items = holdout.loc[user_sel, itemid].values.astype(np.int64)
return (holdout_users, holdout_items)
# additional functionality: rating pediction
def predict_feedback(self):
if self.data.warm_start:
raise NotImplementedError
userid = self.data.fields.userid
itemid = self.data.fields.itemid
feedback = self.data.fields.feedback
holdout = self.data.test.holdout
holdout_users = holdout[userid].values.astype(np.int64)
holdout_items = holdout[itemid].values.astype(np.int64)
u = self.factors[userid]
v = self.factors[itemid]
w = self.factors[feedback]
g = self.factors['core']
gv = np.tensordot(g, v[holdout_items, :], (1, 1))
gu = (gv * u[holdout_users, None, :].T).sum(axis=0)
scores = w.dot(gu).T
predictions = np.argmax(scores, axis=-1)
feedback_idx = self.data.index.feedback.set_index('new')
predicted_feedback = feedback_idx.loc[predictions, 'old'].values
return predicted_feedback
class RandomSampleEvaluationSVDMixin():
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# as deifined in RandomSampleEvaluationMixin in data models
prefix = self.data._holdout_item_prefix
self._prediction_target = f'{prefix}_{self.data.fields.itemid}'
def compute_holdout_scores(self, user_factors, item_factors):
holdout = self.data.test.holdout
userid = self.data.fields.userid
itemid = self.data.fields.itemid
holdout_size = self.data.holdout_size
assert holdout_size >= 1 # only fixed number of holdout items is supported
# "rebase" user index (see comments in `get_recommmendations`)
useridx = pd.factorize(
holdout[userid], sort=False # already sorted via data moodel
)[0].reshape(-1, holdout_size)
itemidx = holdout[itemid].values.reshape(-1, holdout_size)
inner_product = inner_product_at(target='parallel')
# for general matrix factorization user must take care of submitting
# user_factors of the correct shape, otherwise, if holdout contains
# only a subset of all users, the answer will be incorrect
return inner_product(user_factors, item_factors, useridx, itemidx)
def compute_random_item_scores(self, user_factors, item_factors):
holdout = self.data.test.holdout
userid = self.data.fields.userid
test_users = holdout[userid].drop_duplicates().values # preserve sorted
test_items = self.data.unseen_interactions.loc[test_users].values
# "rebase" user index (see comments in `get_recommmendations`)
n_users = len(test_users)
n_items = self.data.unseen_items_num
useridx = np.broadcast_to(np.arange(n_users)[:, None], (n_users, n_items))
itemidx = np.concatenate(test_items).reshape(n_users, n_items)
# perform vectorized scalar products at bulk
inner_product = inner_product_at(target='parallel')
# for general matrix factorization user must take care of submitting
# user_factors of the correct shape, otherwise, if holdout contains
# only a subset of all users, the answer will be incorrect
return inner_product(user_factors, item_factors, useridx, itemidx)
def compute_random_item_scores_gen(self, user_factors, item_factors,
profile_matrix, n_unseen):
userid = self.data.fields.userid
itemid = self.data.fields.itemid
holdout = self.data.test.holdout
n_users = profile_matrix.shape[0]
seed = self.data.seed
holdout_matrix = matrix_from_observations(
holdout, userid, itemid, profile_matrix.shape, feedback=None
)
all_seen = profile_matrix + holdout_matrix # only need nnz indices
scores = np.zeros((n_users, n_unseen))
seedseq = np.random.SeedSequence(seed).generate_state(n_users)
mf_random_item_scoring(
user_factors, item_factors, all_seen.indptr, all_seen.indices,
n_unseen, seedseq, scores
)
return scores
def get_recommendations(self):
itemid = self.data.fields.itemid
if self._prediction_target == itemid:
return super().get_recommendations()
item_factors = self.factors[itemid]
test_matrix, _ = self.get_test_matrix()
user_factors = test_matrix.dot(item_factors)
# from now on will need to work with "rebased" user indices
# to properly index contiguous user matrices
holdout_scores = self.compute_holdout_scores(user_factors, item_factors)
if self.data.unseen_interactions is None:
n_unseen = self.data.unseen_items_num
if n_unseen is None:
raise ValueError('Number of items to sample is unspecified.')
unseen_scores = self.compute_random_item_scores_gen(
user_factors, item_factors, test_matrix, n_unseen
)
else:
unseen_scores = self.compute_random_item_scores(
user_factors, item_factors
)
# combine all scores and rank selected items
scores = np.concatenate((holdout_scores, unseen_scores), axis=1)
return np.apply_along_axis(self.topsort, 1, scores, self.topk)
| mit |
gfyoung/pandas | pandas/tests/io/parser/test_network.py | 1 | 11173 | """
Tests parsers ability to read and parse non-local files
and hence require a network connection to be read.
"""
from io import BytesIO, StringIO
import logging
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame
import pandas._testing as tm
from pandas.io.feather_format import read_feather
from pandas.io.parsers import read_csv
@pytest.mark.network
@pytest.mark.parametrize(
"compress_type, extension",
[("gzip", ".gz"), ("bz2", ".bz2"), ("zip", ".zip"), ("xz", ".xz")],
)
@pytest.mark.parametrize("mode", ["explicit", "infer"])
@pytest.mark.parametrize("engine", ["python", "c"])
def test_compressed_urls(salaries_table, compress_type, extension, mode, engine):
check_compressed_urls(salaries_table, compress_type, extension, mode, engine)
@tm.network
def check_compressed_urls(salaries_table, compression, extension, mode, engine):
# test reading compressed urls with various engines and
# extension inference
base_url = (
"https://github.com/pandas-dev/pandas/raw/master/"
"pandas/tests/io/parser/data/salaries.csv"
)
url = base_url + extension
if mode != "explicit":
compression = mode
url_table = read_csv(url, sep="\t", compression=compression, engine=engine)
tm.assert_frame_equal(url_table, salaries_table)
@tm.network("https://raw.githubusercontent.com/", check_before_test=True)
def test_url_encoding_csv():
"""
read_csv should honor the requested encoding for URLs.
GH 10424
"""
path = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
+ "pandas/tests/io/parser/data/unicode_series.csv"
)
df = read_csv(path, encoding="latin-1", header=None)
assert df.loc[15, 1] == "Á köldum klaka (Cold Fever) (1994)"
@pytest.fixture
def tips_df(datapath):
"""DataFrame with the tips dataset."""
return read_csv(datapath("io", "data", "csv", "tips.csv"))
@pytest.mark.usefixtures("s3_resource")
@td.skip_if_not_us_locale()
class TestS3:
@td.skip_if_no("s3fs")
def test_parse_public_s3_bucket(self, tips_df, s3so):
# more of an integration test due to the not-public contents portion
# can probably mock this though.
for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]:
df = read_csv(
"s3://pandas-test/tips.csv" + ext,
compression=comp,
storage_options=s3so,
)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
# Read public file from bucket with not-public contents
df = read_csv("s3://cant_get_it/tips.csv", storage_options=s3so)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
def test_parse_public_s3n_bucket(self, tips_df, s3so):
# Read from AWS s3 as "s3n" URL
df = read_csv("s3n://pandas-test/tips.csv", nrows=10, storage_options=s3so)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_parse_public_s3a_bucket(self, tips_df, s3so):
# Read from AWS s3 as "s3a" URL
df = read_csv("s3a://pandas-test/tips.csv", nrows=10, storage_options=s3so)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_parse_public_s3_bucket_nrows(self, tips_df, s3so):
for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]:
df = read_csv(
"s3://pandas-test/tips.csv" + ext,
nrows=10,
compression=comp,
storage_options=s3so,
)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_parse_public_s3_bucket_chunked(self, tips_df, s3so):
# Read with a chunksize
chunksize = 5
for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]:
with read_csv(
"s3://pandas-test/tips.csv" + ext,
chunksize=chunksize,
compression=comp,
storage_options=s3so,
) as df_reader:
assert df_reader.chunksize == chunksize
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them
# properly.
df = df_reader.get_chunk()
assert isinstance(df, DataFrame)
assert not df.empty
true_df = tips_df.iloc[
chunksize * i_chunk : chunksize * (i_chunk + 1)
]
tm.assert_frame_equal(true_df, df)
def test_parse_public_s3_bucket_chunked_python(self, tips_df, s3so):
# Read with a chunksize using the Python parser
chunksize = 5
for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]:
with read_csv(
"s3://pandas-test/tips.csv" + ext,
chunksize=chunksize,
compression=comp,
engine="python",
storage_options=s3so,
) as df_reader:
assert df_reader.chunksize == chunksize
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them properly.
df = df_reader.get_chunk()
assert isinstance(df, DataFrame)
assert not df.empty
true_df = tips_df.iloc[
chunksize * i_chunk : chunksize * (i_chunk + 1)
]
tm.assert_frame_equal(true_df, df)
def test_parse_public_s3_bucket_python(self, tips_df, s3so):
for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]:
df = read_csv(
"s3://pandas-test/tips.csv" + ext,
engine="python",
compression=comp,
storage_options=s3so,
)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
def test_infer_s3_compression(self, tips_df, s3so):
for ext in ["", ".gz", ".bz2"]:
df = read_csv(
"s3://pandas-test/tips.csv" + ext,
engine="python",
compression="infer",
storage_options=s3so,
)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
def test_parse_public_s3_bucket_nrows_python(self, tips_df, s3so):
for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]:
df = read_csv(
"s3://pandas-test/tips.csv" + ext,
engine="python",
nrows=10,
compression=comp,
storage_options=s3so,
)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_read_s3_fails(self, s3so):
msg = "The specified bucket does not exist"
with pytest.raises(IOError, match=msg):
read_csv("s3://nyqpug/asdf.csv", storage_options=s3so)
# Receive a permission error when trying to read a private bucket.
# It's irrelevant here that this isn't actually a table.
with pytest.raises(IOError, match=msg):
read_csv("s3://cant_get_it/file.csv")
@pytest.mark.xfail(reason="GH#39155 s3fs upgrade", strict=False)
def test_write_s3_csv_fails(self, tips_df, s3so):
# GH 32486
# Attempting to write to an invalid S3 path should raise
import botocore
# GH 34087
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/error-handling.html
# Catch a ClientError since AWS Service Errors are defined dynamically
error = (FileNotFoundError, botocore.exceptions.ClientError)
with pytest.raises(error, match="The specified bucket does not exist"):
tips_df.to_csv(
"s3://an_s3_bucket_data_doesnt_exit/not_real.csv", storage_options=s3so
)
@pytest.mark.xfail(reason="GH#39155 s3fs upgrade", strict=False)
@td.skip_if_no("pyarrow")
def test_write_s3_parquet_fails(self, tips_df, s3so):
# GH 27679
# Attempting to write to an invalid S3 path should raise
import botocore
# GH 34087
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/error-handling.html
# Catch a ClientError since AWS Service Errors are defined dynamically
error = (FileNotFoundError, botocore.exceptions.ClientError)
with pytest.raises(error, match="The specified bucket does not exist"):
tips_df.to_parquet(
"s3://an_s3_bucket_data_doesnt_exit/not_real.parquet",
storage_options=s3so,
)
def test_read_csv_handles_boto_s3_object(self, s3_resource, tips_file):
# see gh-16135
s3_object = s3_resource.meta.client.get_object(
Bucket="pandas-test", Key="tips.csv"
)
result = read_csv(BytesIO(s3_object["Body"].read()), encoding="utf8")
assert isinstance(result, DataFrame)
assert not result.empty
expected = read_csv(tips_file)
tm.assert_frame_equal(result, expected)
def test_read_csv_chunked_download(self, s3_resource, caplog, s3so):
# 8 MB, S3FS usees 5MB chunks
import s3fs
df = DataFrame(np.random.randn(100000, 4), columns=list("abcd"))
str_buf = StringIO()
df.to_csv(str_buf)
buf = BytesIO(str_buf.getvalue().encode("utf-8"))
s3_resource.Bucket("pandas-test").put_object(Key="large-file.csv", Body=buf)
# Possibly some state leaking in between tests.
# If we don't clear this cache, we saw `GetObject operation: Forbidden`.
# Presumably the s3fs instance is being cached, with the directory listing
# from *before* we add the large-file.csv in the pandas-test bucket.
s3fs.S3FileSystem.clear_instance_cache()
with caplog.at_level(logging.DEBUG, logger="s3fs"):
read_csv("s3://pandas-test/large-file.csv", nrows=5, storage_options=s3so)
# log of fetch_range (start, stop)
assert (0, 5505024) in (x.args[-2:] for x in caplog.records)
def test_read_s3_with_hash_in_key(self, tips_df, s3so):
# GH 25945
result = read_csv("s3://pandas-test/tips#1.csv", storage_options=s3so)
tm.assert_frame_equal(tips_df, result)
@td.skip_if_no("pyarrow")
def test_read_feather_s3_file_path(self, feather_file, s3so):
# GH 29055
expected = read_feather(feather_file)
res = read_feather(
"s3://pandas-test/simple_dataset.feather", storage_options=s3so
)
tm.assert_frame_equal(expected, res)
| bsd-3-clause |
rishikksh20/scikit-learn | examples/linear_model/plot_sgd_iris.py | 58 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
gotomypc/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
glouppe/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 25 | 2252 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
joshloyal/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 26 | 10161 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def check_results(kernel, h, atol, rtol, breadth_first, bt, Y, dens_true):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first, bt, Y, dens_true)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
TomasDuro/paparazzi | sw/tools/calibration/calibrate_gyro.py | 87 | 4686 | #! /usr/bin/env python
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
#
# calibrate gyrometers using turntable measurements
#
from __future__ import print_function, division
from optparse import OptionParser
import os
import sys
from scipy import linspace, polyval, stats
import matplotlib.pyplot as plt
import calibration_utils
#
# lisa 3
# p : a=-4511.16 b=31948.34, std error= 0.603
# q : a=-4598.46 b=31834.48, std error= 0.734
# r : a=-4525.63 b=32687.95, std error= 0.624
#
# lisa 4
# p : a=-4492.05 b=32684.94, std error= 0.600
# q : a=-4369.63 b=33260.96, std error= 0.710
# r : a=-4577.13 b=32707.72, std error= 0.730
#
# crista
# p : a= 3864.82 b=31288.09, std error= 0.866
# q : a= 3793.71 b=32593.89, std error= 3.070
# r : a= 3817.11 b=32709.70, std error= 3.296
#
def main():
usage = "usage: %prog --id <ac_id> --tt_id <tt_id> --axis <axis> [options] log_filename.data" + "\n" + "Run %prog --help to list the options."
parser = OptionParser(usage)
parser.add_option("-i", "--id", dest="ac_id",
action="store", type=int, default=-1,
help="aircraft id to use")
parser.add_option("-t", "--tt_id", dest="tt_id",
action="store", type=int, default=-1,
help="turntable id to use")
parser.add_option("-a", "--axis", dest="axis",
type="choice", choices=['p', 'q', 'r'],
help="axis to calibrate (p, q, r)",
action="store")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
else:
if os.path.isfile(args[0]):
filename = args[0]
else:
print(args[0] + " not found")
sys.exit(1)
if not filename.endswith(".data"):
parser.error("Please specify a *.data log file")
if options.ac_id < 0 or options.ac_id > 255:
parser.error("Specify a valid aircraft id number!")
if options.tt_id < 0 or options.tt_id > 255:
parser.error("Specify a valid turntable id number!")
if options.verbose:
print("reading file "+filename+" for aircraft "+str(options.ac_id)+" and turntable "+str(options.tt_id))
samples = calibration_utils.read_turntable_log(options.ac_id, options.tt_id, filename, 1, 7)
if len(samples) == 0:
print("Error: found zero matching messages in log file!")
print("Was looking for IMU_TURNTABLE from id: "+str(options.tt_id)+" and IMU_GYRO_RAW from id: "+str(options.ac_id)+" in file "+filename)
sys.exit(1)
if options.verbose:
print("found "+str(len(samples))+" records")
if options.axis == 'p':
axis_idx = 1
elif options.axis == 'q':
axis_idx = 2
elif options.axis == 'r':
axis_idx = 3
else:
parser.error("Specify a valid axis!")
#Linear regression using stats.linregress
t = samples[:, 0]
xn = samples[:, axis_idx]
(a_s, b_s, r, tt, stderr) = stats.linregress(t, xn)
print('Linear regression using stats.linregress')
print(('regression: a=%.2f b=%.2f, std error= %.3f' % (a_s, b_s, stderr)))
print(('<define name="GYRO_X_NEUTRAL" value="%d"/>' % (b_s)))
print(('<define name="GYRO_X_SENS" value="%f" integer="16"/>' % (pow(2, 12)/a_s)))
#
# overlay fited value
#
ovl_omega = linspace(1, 7.5, 10)
ovl_adc = polyval([a_s, b_s], ovl_omega)
plt.title('Linear Regression Example')
plt.subplot(3, 1, 1)
plt.plot(samples[:, 1])
plt.plot(samples[:, 2])
plt.plot(samples[:, 3])
plt.legend(['p', 'q', 'r'])
plt.subplot(3, 1, 2)
plt.plot(samples[:, 0])
plt.subplot(3, 1, 3)
plt.plot(samples[:, 0], samples[:, axis_idx], 'b.')
plt.plot(ovl_omega, ovl_adc, 'r')
plt.show()
if __name__ == "__main__":
main()
| gpl-2.0 |
tobegit3hub/deep_cnn | java_predict_client/src/main/proto/tensorflow/examples/learn/mnist.py | 13 | 3936 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib import learn
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(
tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv_model(feature, target, mode):
"""2-layer convolution model."""
# Convert the target to a one-hot tensor of shape (batch_size, 10) and
# with a on-value of 1 for each one-hot vector of length 10.
target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)
# Reshape feature to 4d tensor with 2nd and 3rd dimensions being
# image width and height final dimension being the number of color channels.
feature = tf.reshape(feature, [-1, 28, 28, 1])
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = layers.convolution(feature, 32, kernel_size=[5, 5],
activation_fn=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
h_conv2 = layers.convolution(h_pool1, 64, kernel_size=[5, 5],
activation_fn=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# Densely connected layer with 1024 neurons.
h_fc1 = layers.dropout(
layers.fully_connected(
h_pool2_flat, 1024, activation_fn=tf.nn.relu), keep_prob=0.5,
is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a tensor for training op.
train_op = layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='SGD',
learning_rate=0.001)
return tf.argmax(logits, 1), loss, train_op
def main(unused_args):
### Download and load MNIST dataset.
mnist = learn.datasets.load_dataset('mnist')
### Linear classifier.
feature_columns = learn.infer_real_valued_columns_from_input(
mnist.train.images)
classifier = learn.LinearClassifier(
feature_columns=feature_columns, n_classes=10)
classifier.fit(mnist.train.images, mnist.train.labels.astype(np.int32),
batch_size=100, steps=1000)
score = metrics.accuracy_score(
mnist.test.labels, list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
classifier = learn.Estimator(model_fn=conv_model)
classifier.fit(mnist.train.images, mnist.train.labels,
batch_size=100, steps=20000)
score = metrics.accuracy_score(
mnist.test.labels, list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
macks22/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
adamallo/scripts_singlecrypt | subsmodel/evaluate28.py | 1 | 3607 | # This program uses the 28-state model to evaluate the likelihood
# of a tiny tree at various branch lengths, demonstrating how
# the evaluations work. It relies on a rate matrix made by
# program ratematrix28.py, and uses the eigenvalue/eigenvector
# approach to compute the likelihoods.
epsilon = 0.00000000001 # 1e-11
null = 0
a = 7
b = 1
aa = 13
ab = 8
bb = 2
aaa = 18
aab = 14
abb = 9
bbb = 3
aaaa = 22
aaab = 19
aabb = 15
abbb = 10
bbbb = 4
aaaaa = 25
aaaab = 23
aaabb = 20
aabbb = 16
abbbb = 11
bbbbb = 5
aaaaaa = 27
aaaaab = 26
aaaabb = 24
aaabbb = 21
aabbbb = 17
abbbbb = 12
bbbbbb = 6
def notzero(x):
return x < epsilon
# unpickle the components of the rate matrices
import pickle
picklefile = open("rates.pkl","r")
# b matrix
bmatrix = pickle.load(picklefile)
# t matrix
tmatrix = pickle.load(picklefile)
# t inverse
tinverse = pickle.load(picklefile)
picklefile.close()
# read in the data
infile = open("data","r")
seq1 = infile.readline()
seq1 = seq1.rstrip()
seq1 = seq1.split()
seq2 = infile.readline()
seq2 = seq2.rstrip()
seq2 = seq2.split()
numsites = len(seq1)
numstates = 28
assert numsites == len(seq2)
infile.close()
# iterate over t1 and t2
testvals = [0.1, 0.125, 0.15, 0.175, 0.2, 0.225, 0.25, 0.275, 0.3,0.325,0.35,0.375,0.4]
scores = []
for t1 in testvals:
myscores = []
for t2 in testvals:
# set up three dlcells
dl1 = [[0.0 for x in xrange(numstates)] for x in xrange(numsites)]
dl2 = [[0.0 for x in xrange(numstates)] for x in xrange(numsites)]
dl3 = [[0.0 for x in xrange(numstates)] for x in xrange(numsites)]
for n in xrange(numsites):
site = int(seq1[n])
dl1[n][site] = 1.0
site = int(seq2[n])
dl2[n][site] = 1.0
# WATCH OUT probably need logs here!
# compute probabilities down branches
# exponentiate for t1
import numpy
import copy
import math
b1 = copy.deepcopy(bmatrix)
for i in xrange(len(b1)):
b1[i][i] = math.exp(b1[i][i] * t1)
p1 = tmatrix * b1 * tinverse
p1 = p1.tolist()
b2 = copy.deepcopy(bmatrix)
for i in xrange(len(b2)):
b2[i][i] = math.exp(b2[i][i] * t2)
p2 = tmatrix * b2 * tinverse
p2 = p2.tolist()
# trying without logs for now, we'll see....
#for i in xrange(len(newprobs)):
# for j in xrange(len(newprobs)):
# if notzero(newprobs[i][j]): # this line zeroes any element < epsilon
# newprobs[i][j] = math.log(newprobs[i][j])
# else:
# newprobs[i][j] = 0.0
# compute dl3 values based on dl1, dl2, and newprobs
for site in xrange(numsites):
for top1 in xrange(numstates):
for top2 in xrange(numstates):
for bottom in xrange(numstates):
dl3[site][bottom] += dl1[site][top1] * p1[top1][bottom] * dl2[site][top2] * p2[top2][bottom]
# compute data likelihood assuming ancestor was state 4 with 100%
# probability; here we go to logs
lnlike = 0.0
for site in xrange(numsites):
lnlike += math.log(dl3[site][ab])
myscores.append(lnlike)
scores.append(myscores[:])
#import matplotlib.pyplot as plt
bestval = scores[0][0]
best1 = testvals[0]
best2 = testvals[0]
for t1 in xrange(len(testvals)):
for t2 in xrange(len(testvals)):
if scores[t1][t2] > bestval:
bestval = scores[t1][t2]
best1 = testvals[t1]
best2 = testvals[t2]
print bestval, best1, best2
#plt.imshow(scores)
#plt.show()
| gpl-3.0 |
Northrend/mxnet | example/speech_recognition/stt_utils.py | 44 | 5892 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import os.path
import numpy as np
import soundfile
from numpy.lib.stride_tricks import as_strided
logger = logging.getLogger(__name__)
def calc_feat_dim(window, max_freq):
return int(0.001 * window * max_freq) + 1
def conv_output_length(input_length, filter_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
return (output_length + stride - 1) // stride
def spectrogram(samples, fft_length=256, sample_rate=2, hop_length=128):
"""
Compute the spectrogram for a real signal.
The parameters follow the naming convention of
matplotlib.mlab.specgram
Args:
samples (1D array): input audio signal
fft_length (int): number of elements in fft window
sample_rate (scalar): sample rate
hop_length (int): hop length (relative offset between neighboring
fft windows).
Returns:
x (2D array): spectrogram [frequency x time]
freq (1D array): frequency of each row in x
Note:
This is a truncating computation e.g. if fft_length=10,
hop_length=5 and the signal has 23 elements, then the
last 3 elements will be truncated.
"""
assert not np.iscomplexobj(samples), "Must not pass in complex numbers"
window = np.hanning(fft_length)[:, None]
window_norm = np.sum(window ** 2)
# The scaling below follows the convention of
# matplotlib.mlab.specgram which is the same as
# matlabs specgram.
scale = window_norm * sample_rate
trunc = (len(samples) - fft_length) % hop_length
x = samples[:len(samples) - trunc]
# "stride trick" reshape to include overlap
nshape = (fft_length, (len(x) - fft_length) // hop_length + 1)
nstrides = (x.strides[0], x.strides[0] * hop_length)
x = as_strided(x, shape=nshape, strides=nstrides)
# window stride sanity check
assert np.all(x[:, 1] == samples[hop_length:(hop_length + fft_length)])
# broadcast window, compute fft over columns and square mod
# This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT).
x = np.fft.rfft(x * window, axis=0)
x = np.absolute(x) ** 2
# scale, 2.0 for everything except dc and fft_length/2
x[1:-1, :] *= (2.0 / scale)
x[(0, -1), :] /= scale
freqs = float(sample_rate) / fft_length * np.arange(x.shape[0])
return x, freqs
def spectrogram_from_file(filename, step=10, window=20, max_freq=None,
eps=1e-14, overwrite=False, save_feature_as_csvfile=False):
""" Calculate the log of linear spectrogram from FFT energy
Params:
filename (str): Path to the audio file
step (int): Step size in milliseconds between windows
window (int): FFT window size in milliseconds
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned
eps (float): Small value to ensure numerical stability (for ln(x))
"""
csvfilename = filename.replace(".wav", ".csv")
if (os.path.isfile(csvfilename) is False) or overwrite:
with soundfile.SoundFile(filename) as sound_file:
audio = sound_file.read(dtype='float32')
sample_rate = sound_file.samplerate
if audio.ndim >= 2:
audio = np.mean(audio, 1)
if max_freq is None:
max_freq = sample_rate / 2
if max_freq > sample_rate / 2:
raise ValueError("max_freq must not be greater than half of "
" sample rate")
if step > window:
raise ValueError("step size must not be greater than window size")
hop_length = int(0.001 * step * sample_rate)
fft_length = int(0.001 * window * sample_rate)
pxx, freqs = spectrogram(
audio, fft_length=fft_length, sample_rate=sample_rate,
hop_length=hop_length)
ind = np.where(freqs <= max_freq)[0][-1] + 1
res = np.transpose(np.log(pxx[:ind, :] + eps))
if save_feature_as_csvfile:
np.savetxt(csvfilename, res)
return res
else:
return np.loadtxt(csvfilename)
| apache-2.0 |
mlyundin/scikit-learn | examples/model_selection/randomized_search.py | 201 | 3214 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
rs2/pandas | pandas/tests/series/indexing/test_boolean.py | 2 | 3556 | import numpy as np
import pytest
from pandas import Index, Series
import pandas._testing as tm
from pandas.core.indexing import IndexingError
from pandas.tseries.offsets import BDay
def test_getitem_boolean(string_series):
s = string_series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
tm.assert_series_equal(result, expected)
tm.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty():
s = Series([], dtype=np.int64)
s.index.name = "index_name"
s = s[s.isna()]
assert s.index.name == "index_name"
assert s.dtype == np.int64
# GH5877
# indexing with empty series
s = Series(["A", "B"])
expected = Series(dtype=object, index=Index([], dtype="int64"))
result = s[Series([], dtype=object)]
tm.assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
msg = (
r"Unalignable boolean Series provided as indexer \(index of "
r"the boolean Series and of the indexed object do not match"
)
with pytest.raises(IndexingError, match=msg):
s[Series([], dtype=bool)]
with pytest.raises(IndexingError, match=msg):
s[Series([True], dtype=bool)]
def test_getitem_boolean_object(string_series):
# using column from DataFrame
s = string_series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
tm.assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
tm.assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
msg = "Cannot mask with non-boolean array containing NA / NaN values"
with pytest.raises(ValueError, match=msg):
s[omask]
with pytest.raises(ValueError, match=msg):
s[omask] = 5
def test_getitem_setitem_boolean_corner(datetime_series):
ts = datetime_series
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
msg = (
r"Unalignable boolean Series provided as indexer \(index of "
r"the boolean Series and of the indexed object do not match"
)
with pytest.raises(IndexingError, match=msg):
ts[mask_shifted]
with pytest.raises(IndexingError, match=msg):
ts[mask_shifted] = 1
with pytest.raises(IndexingError, match=msg):
ts.loc[mask_shifted]
with pytest.raises(IndexingError, match=msg):
ts.loc[mask_shifted] = 1
def test_setitem_boolean(string_series):
mask = string_series > string_series.median()
# similar indexed series
result = string_series.copy()
result[mask] = string_series * 2
expected = string_series * 2
tm.assert_series_equal(result[mask], expected[mask])
# needs alignment
result = string_series.copy()
result[mask] = (string_series * 2)[0:5]
expected = (string_series * 2)[0:5].reindex_like(string_series)
expected[-mask] = string_series[mask]
tm.assert_series_equal(result[mask], expected[mask])
def test_get_set_boolean_different_order(string_series):
ordered = string_series.sort_values()
# setting
copy = string_series.copy()
copy[ordered > 0] = 0
expected = string_series.copy()
expected[expected > 0] = 0
tm.assert_series_equal(copy, expected)
# getting
sel = string_series[ordered > 0]
exp = string_series[string_series > 0]
tm.assert_series_equal(sel, exp)
| bsd-3-clause |
alorenzo175/pvlib-python | pvlib/_deprecation.py | 6 | 13140 | """Matplotlib license for the deprecation module.
License agreement for matplotlib versions 1.3.0 and later
=========================================================
1. This LICENSE AGREEMENT is between the Matplotlib Development Team
("MDT"), and the Individual or Organization ("Licensee") accessing and
otherwise using matplotlib software in source or binary form and its
associated documentation.
2. Subject to the terms and conditions of this License Agreement, MDT
hereby grants Licensee a nonexclusive, royalty-free, world-wide license
to reproduce, analyze, test, perform and/or display publicly, prepare
derivative works, distribute, and otherwise use matplotlib
alone or in any derivative version, provided, however, that MDT's
License Agreement and MDT's notice of copyright, i.e., "Copyright (c)
2012- Matplotlib Development Team; All Rights Reserved" are retained in
matplotlib alone or in any derivative version prepared by
Licensee.
3. In the event Licensee prepares a derivative work that is based on or
incorporates matplotlib or any part thereof, and wants to
make the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to matplotlib .
4. MDT is making matplotlib available to Licensee on an "AS
IS" basis. MDT MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, MDT MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB
WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
5. MDT SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR
LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING
MATPLOTLIB , OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF
THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between MDT and
Licensee. This License Agreement does not grant permission to use MDT
trademarks or trade name in a trademark sense to endorse or promote
products or services of Licensee, or any third party.
8. By copying, installing or otherwise using matplotlib ,
Licensee agrees to be bound by the terms and conditions of this License
Agreement.
License agreement for matplotlib versions prior to 1.3.0
========================================================
1. This LICENSE AGREEMENT is between John D. Hunter ("JDH"), and the
Individual or Organization ("Licensee") accessing and otherwise using
matplotlib software in source or binary form and its associated
documentation.
2. Subject to the terms and conditions of this License Agreement, JDH
hereby grants Licensee a nonexclusive, royalty-free, world-wide license
to reproduce, analyze, test, perform and/or display publicly, prepare
derivative works, distribute, and otherwise use matplotlib
alone or in any derivative version, provided, however, that JDH's
License Agreement and JDH's notice of copyright, i.e., "Copyright (c)
2002-2011 John D. Hunter; All Rights Reserved" are retained in
matplotlib alone or in any derivative version prepared by
Licensee.
3. In the event Licensee prepares a derivative work that is based on or
incorporates matplotlib or any part thereof, and wants to
make the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to matplotlib.
4. JDH is making matplotlib available to Licensee on an "AS
IS" basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB
WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR
LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING
MATPLOTLIB , OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF
THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between JDH and
Licensee. This License Agreement does not grant permission to use JDH
trademarks or trade name in a trademark sense to endorse or promote
products or services of Licensee, or any third party.
8. By copying, installing or otherwise using matplotlib,
Licensee agrees to be bound by the terms and conditions of this License
Agreement.
"""
# modified from Matplotlib b97cd2d (post 2.2.2) in the following ways:
# 1. use module-level _projectName = 'pvlib' and
# _projectWarning = 'pvlibDeprecationWarning' in place of MPL specific
# string/Class.
# 2. remove keyword only argument requirement for removal
# 3. remove deprecated obj_type from deprecated function
# 4. if removal is empty, say 'soon' instead of assuming two minor releases
# later.
import functools
import textwrap
import warnings
class pvlibDeprecationWarning(UserWarning):
"""A class for issuing deprecation warnings for pvlib users.
In light of the fact that Python builtin DeprecationWarnings are ignored
by default as of Python 2.7 (see link below), this class was put in to
allow for the signaling of deprecation, but via UserWarnings which are not
ignored by default.
https://docs.python.org/dev/whatsnew/2.7.html#the-future-for-python-2-x
"""
pass
# make it easier for others to copy paste this code into their projects
_projectName = 'pvlib'
_projectWarning = pvlibDeprecationWarning
def _generate_deprecation_message(
since, message='', name='', alternative='', pending=False,
obj_type='attribute', addendum='', removal=''):
if removal == "":
removal = "soon"
elif removal:
if pending:
raise ValueError(
"A pending deprecation cannot have a scheduled removal")
removal = "in {}".format(removal)
if not message:
message = (
"The %(name)s %(obj_type)s"
+ (" will be deprecated in a future version"
if pending else
(" was deprecated in %(projectName)s %(since)s"
+ (" and will be removed %(removal)s"
if removal else
"")))
+ "."
+ (" Use %(alternative)s instead." if alternative else "")
+ (" %(addendum)s" if addendum else ""))
return message % dict(
func=name, name=name, obj_type=obj_type, since=since, removal=removal,
alternative=alternative, addendum=addendum, projectName=_projectName)
def warn_deprecated(
since, message='', name='', alternative='', pending=False,
obj_type='attribute', addendum='', removal=''):
"""
Used to display deprecation in a standard way.
Parameters
----------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The format
specifier `%(name)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)s` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated object.
alternative : str, optional
An alternative API that the user may use in place of the deprecated
API. The deprecation warning will tell the user about this alternative
if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning. Cannot be used together with *removal*.
removal : str, optional
The expected removal version. With the default (an empty string), a
removal version is automatically computed from *since*. Set to other
Falsy values to not schedule a removal date. Cannot be used together
with *pending*.
obj_type : str, optional
The object type being deprecated.
addendum : str, optional
Additional text appended directly to the final message.
Examples
--------
Basic example::
# To warn of the deprecation of "matplotlib.name_of_module"
warn_deprecated('1.4.0', name='matplotlib.name_of_module',
obj_type='module')
"""
message = '\n' + _generate_deprecation_message(
since, message, name, alternative, pending, obj_type, addendum,
removal=removal)
category = (PendingDeprecationWarning if pending
else _projectWarning)
warnings.warn(message, category, stacklevel=2)
def deprecated(since, message='', name='', alternative='', pending=False,
addendum='', removal=''):
"""
Decorator to mark a function or a class as deprecated.
Parameters
----------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier `%(name)s` may be used for the name of the object,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
object.
name : str, optional
The name of the deprecated object; if not provided the name
is automatically determined from the passed in object,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative API that the user may use in place of the deprecated
API. The deprecation warning will tell the user about this alternative
if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning. Cannot be used together with *removal*.
removal : str, optional
The expected removal version. With the default (an empty string), a
removal version is automatically computed from *since*. Set to other
Falsy values to not schedule a removal date. Cannot be used together
with *pending*.
addendum : str, optional
Additional text appended directly to the final message.
Examples
--------
Basic example::
@deprecated('1.4.0')
def the_function_to_deprecate():
pass
"""
def deprecate(obj, message=message, name=name, alternative=alternative,
pending=pending, addendum=addendum):
if not name:
name = obj.__name__
if isinstance(obj, type):
obj_type = "class"
old_doc = obj.__doc__
func = obj.__init__
def finalize(wrapper, new_doc):
obj.__doc__ = new_doc
obj.__init__ = wrapper
return obj
else:
obj_type = "function"
if isinstance(obj, classmethod):
func = obj.__func__
old_doc = func.__doc__
def finalize(wrapper, new_doc):
wrapper = functools.wraps(func)(wrapper)
wrapper.__doc__ = new_doc
return classmethod(wrapper)
else:
func = obj
old_doc = func.__doc__
def finalize(wrapper, new_doc):
wrapper = functools.wraps(func)(wrapper)
wrapper.__doc__ = new_doc
return wrapper
message = _generate_deprecation_message(
since, message, name, alternative, pending, obj_type, addendum,
removal=removal)
category = (PendingDeprecationWarning if pending
else _projectWarning)
def wrapper(*args, **kwargs):
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
old_doc = textwrap.dedent(old_doc or '').strip('\n')
message = message.strip()
new_doc = (('\n.. deprecated:: %(since)s'
'\n %(message)s\n\n' %
{'since': since, 'message': message}) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
return finalize(wrapper, new_doc)
return deprecate
| bsd-3-clause |
moreati/pandashells | pandashells/lib/plot_lib.py | 7 | 4022 | #! /usr/bin/env python
import sys
import re
from pandashells.lib import module_checker_lib
module_checker_lib.check_for_modules(
['matplotlib', 'dateutil', 'mpld3', 'seaborn'])
from dateutil.parser import parse
import matplotlib as mpl
import pylab as pl
import seaborn as sns
import mpld3
def show(args):
# if figure saving requested
if hasattr(args, 'savefig') and args.savefig:
# save html if requested
rex_html = re.compile('.*?\.html$')
if rex_html.match(args.savefig[0]):
fig = pl.gcf()
html = mpld3.fig_to_html(fig)
with open(args.savefig[0], 'w') as outfile:
outfile.write(html)
return
# save image types
pl.savefig(args.savefig[0])
# otherwise show to screen
else:
pl.show()
def set_plot_styling(args):
# set up seaborn context
sns.set(context=args.plot_context[0],
style=args.plot_theme[0],
palette=args.plot_palette[0])
# modify seaborn slightly to look good in interactive backends
if 'white' not in args.plot_theme[0]:
mpl.rcParams['figure.facecolor'] = 'white'
mpl.rcParams['figure.edgecolor'] = 'white'
def set_limits(args):
if args.xlim:
pl.gca().set_xlim(args.xlim)
if args.ylim:
pl.gca().set_ylim(args.ylim)
def set_scale(args):
if args.xlog:
pl.gca().set_xscale('log')
if args.ylog:
pl.gca().set_yscale('log')
def set_labels_title(args):
if args.title:
pl.title(args.title[0])
if args.xlabel:
pl.xlabel(args.xlabel[0])
if args.ylabel:
pl.ylabel(args.ylabel[0])
def set_legend(args):
if args.legend:
loc = args.legend[0]
rex = re.compile(r'\d')
m = rex.match(loc)
if m:
loc = int(loc)
else:
loc = 'best'
pl.legend(loc=loc)
def set_grid(args):
if args.no_grid:
pl.grid(False)
else:
pl.grid(True)
def ensure_xy_args(args):
x_is_none = args.x is None
y_is_none = args.y is None
if (x_is_none ^ y_is_none):
msg = "\nIf either x or y is specified, both must be specified\n\n"
sys.stderr.write(msg)
sys.exit(1)
def ensure_xy_omission_state(args, df):
if (len(df.columns) != 2) and (args.x is None):
msg = "\n\n x and y can be ommited only "
msg += "for 2-column data-frames\n"
sys.stderr.write(msg)
sys.exit(1)
def autofill_plot_fields_and_labels(args, df):
# add labels for two column inputs
if (args.x is None) and (len(df.columns) == 2):
args.x = [df.columns[0]]
args.y = [df.columns[1]]
# if no xlabel, set it to the x field
if args.xlabel is None:
args.xlabel = args.x
# if no ylabel, and only 1 trace being plotted, set ylabel to that field
if (args.ylabel is None) and (len(args.y) == 1):
args.ylabel = [args.y[0]]
def str_to_date(x):
try:
basestring
except NameError:
basestring = str
if isinstance(x.iloc[0], basestring):
return [parse(e) for e in x]
else:
return x
def draw_traces(args, df):
y_field_list = args.y
x = str_to_date(df[args.x[0]])
style_list = args.style
alpha_list = args.alpha
if len(style_list) != len(y_field_list):
style_list = [style_list[0] for y_field in y_field_list]
if len(alpha_list) != len(y_field_list):
alpha_list = [alpha_list[0] for y_field in y_field_list]
for y_field, style, alpha in zip(y_field_list, style_list, alpha_list):
y = df[y_field]
pl.plot(x, y, style, label=y_field, alpha=alpha)
def refine_plot(args):
set_limits(args)
set_scale(args)
set_labels_title(args)
set_grid(args)
set_legend(args)
def draw_xy_plot(args, df):
ensure_xy_args(args)
ensure_xy_omission_state(args, df)
autofill_plot_fields_and_labels(args, df)
draw_traces(args, df)
refine_plot(args)
show(args)
| bsd-2-clause |
mne-tools/mne-tools.github.io | 0.17/_downloads/229f899fdb270fdb9c428734866a8207/plot_visualize_epochs.py | 10 | 5143 | """
.. _tut_viz_epochs:
Visualize Epochs data
=====================
"""
# sphinx_gallery_thumbnail_number = 7
import os.path as op
import mne
data_path = op.join(mne.datasets.sample.data_path(), 'MEG', 'sample')
raw = mne.io.read_raw_fif(
op.join(data_path, 'sample_audvis_raw.fif'), preload=True)
raw.load_data().filter(None, 9, fir_design='firwin')
raw.set_eeg_reference('average', projection=True) # set EEG average reference
event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'smiley': 5, 'button': 32}
events = mne.read_events(op.join(data_path, 'sample_audvis_raw-eve.fif'))
epochs = mne.Epochs(raw, events, event_id=event_id, tmin=-0.2, tmax=.5)
###############################################################################
# This tutorial focuses on visualization of epoched data. All of the functions
# introduced here are basically high level matplotlib functions with built in
# intelligence to work with epoched data. All the methods return a handle to
# matplotlib figure instance.
#
# Events used for constructing the epochs here are the triggers for subject
# being presented a smiley face at the center of the visual field. More of the
# paradigm at :ref:`BABDHIFJ`.
#
# All plotting functions start with ``plot``. Let's start with the most
# obvious. :func:`mne.Epochs.plot` offers an interactive browser that allows
# rejection by hand when called in combination with a keyword ``block=True``.
# This blocks the execution of the script until the browser window is closed.
epochs.plot(block=True)
###############################################################################
# The numbers at the top refer to the event id of the epoch. The number at the
# bottom is the running numbering for the epochs.
#
# Since we did no artifact correction or rejection, there are epochs
# contaminated with blinks and saccades. For instance, epoch number 1 seems to
# be contaminated by a blink (scroll to the bottom to view the EOG channel).
# This epoch can be marked for rejection by clicking on top of the browser
# window. The epoch should turn red when you click it. This means that it will
# be dropped as the browser window is closed.
#
# It is possible to plot event markers on epoched data by passing ``events``
# keyword to the epochs plotter. The events are plotted as vertical lines and
# they follow the same coloring scheme as :func:`mne.viz.plot_events`. The
# events plotter gives you all the events with a rough idea of the timing.
# Since the colors are the same, the event plotter can also function as a
# legend for the epochs plotter events. It is also possible to pass your own
# colors via ``event_colors`` keyword. Here we can plot the reaction times
# between seeing the smiley face and the button press (event 32).
#
# When events are passed, the epoch numbering at the bottom is switched off by
# default to avoid overlaps. You can turn it back on via settings dialog by
# pressing `o` key. You should check out `help` at the lower left corner of the
# window for more information about the interactive features.
events = mne.pick_events(events, include=[5, 32])
mne.viz.plot_events(events)
epochs['smiley'].plot(events=events)
###############################################################################
# To plot individual channels as an image, where you see all the epochs at one
# glance, you can use function :func:`mne.Epochs.plot_image`. It shows the
# amplitude of the signal over all the epochs plus an average (evoked response)
# of the activation. We explicitly set interactive colorbar on (it is also on
# by default for plotting functions with a colorbar except the topo plots). In
# interactive mode you can scale and change the colormap with mouse scroll and
# up/down arrow keys. You can also drag the colorbar with left/right mouse
# button. Hitting space bar resets the scale.
epochs.plot_image(278, cmap='interactive', sigma=1., vmin=-250, vmax=250)
###############################################################################
# We can also give an overview of all channels by calculating the global
# field power (or other other aggregation methods). However, combining
# multiple channel types (e.g., MEG and EEG) in this way is not sensible.
# Instead, we can use the ``group_by`` parameter. Setting ``group_by`` to
# 'type' combines channels by type.
# ``group_by`` can also be used to group channels into arbitrary groups, e.g.
# regions of interests, by providing a dictionary containing
# group name -> channel indices mappings.
epochs.plot_image(combine='gfp', group_by='type', sigma=2., cmap="YlGnBu_r")
###############################################################################
# You also have functions for plotting channelwise information arranged into a
# shape of the channel array. The image plotting uses automatic scaling by
# default, but noisy channels and different channel types can cause the scaling
# to be a bit off. Here we define the limits by hand.
epochs.plot_topo_image(vmin=-250, vmax=250, title='ERF images', sigma=2.,
fig_facecolor='w', font_color='k')
| bsd-3-clause |
nmartensen/pandas | pandas/core/indexes/timedeltas.py | 1 | 34001 | """ implement the TimedeltaIndex """
from datetime import timedelta
import numpy as np
from pandas.core.dtypes.common import (
_TD_DTYPE,
is_integer, is_float,
is_bool_dtype,
is_list_like,
is_scalar,
is_integer_dtype,
is_object_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
_ensure_int64)
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.common import _maybe_box, _values_from_object
from pandas.core.indexes.base import Index
from pandas.core.indexes.numeric import Int64Index
import pandas.compat as compat
from pandas.compat import u
from pandas.tseries.frequencies import to_offset
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.base import _shared_docs
from pandas.core.indexes.base import _index_shared_docs
import pandas.core.common as com
import pandas.core.dtypes.concat as _concat
from pandas.util._decorators import Appender, Substitution, deprecate_kwarg
from pandas.core.indexes.datetimelike import TimelikeOps, DatetimeIndexOpsMixin
from pandas.core.tools.timedeltas import (
to_timedelta, _coerce_scalar_to_timedelta_type)
from pandas.tseries.offsets import Tick, DateOffset
from pandas._libs import (lib, index as libindex, tslib as libts,
join as libjoin, Timedelta, NaT, iNaT)
def _td_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert timedelta-like to timedelta64
"""
def wrapper(self, other):
msg = "cannot compare a TimedeltaIndex with type {0}"
func = getattr(super(TimedeltaIndex, self), opname)
if _is_convertible_to_td(other) or other is NaT:
try:
other = _to_m8(other)
except ValueError:
# failed to parse as timedelta
raise TypeError(msg.format(type(other)))
result = func(other)
if isna(other):
result.fill(nat_result)
else:
if not is_list_like(other):
raise TypeError(msg.format(type(other)))
other = TimedeltaIndex(other).values
result = func(other)
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view('i8') == iNaT
else:
o_mask = other.view('i8') == iNaT
if o_mask.any():
result[o_mask] = nat_result
if self.hasnans:
result[self._isnan] = nat_result
# support of bool dtype indexers
if is_bool_dtype(result):
return result
return Index(result)
return wrapper
class TimedeltaIndex(DatetimeIndexOpsMixin, TimelikeOps, Int64Index):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
which can be boxed to timedelta objects
Parameters
----------
data : array-like (1-dimensional), optional
Optional timedelta-like data to construct index with
unit: unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
which is an integer/float number
freq: a frequency for the index, optional
copy : bool
Make a copy of input ndarray
start : starting value, timedelta-like, optional
If data is None, start is used as the start point in generating regular
timedelta data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, timedelta-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
name : object
Name to be stored in the index
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
_typ = 'timedeltaindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(
joinf, dtype='m8[ns]', **kwargs)
_inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(libjoin.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
libjoin.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
# define my properties & methods for delegation
_other_ops = []
_bool_ops = []
_object_ops = ['freq']
_field_ops = ['days', 'seconds', 'microseconds', 'nanoseconds']
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
_datetimelike_methods = ["to_pytimedelta", "total_seconds",
"round", "floor", "ceil"]
__eq__ = _td_index_cmp('__eq__')
__ne__ = _td_index_cmp('__ne__', nat_result=True)
__lt__ = _td_index_cmp('__lt__')
__gt__ = _td_index_cmp('__gt__')
__le__ = _td_index_cmp('__le__')
__ge__ = _td_index_cmp('__ge__')
_engine_type = libindex.TimedeltaEngine
_comparables = ['name', 'freq']
_attributes = ['name', 'freq']
_is_numeric_dtype = True
_infer_as_myclass = True
freq = None
def __new__(cls, data=None, unit=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None,
closed=None, verify_integrity=True, **kwargs):
if isinstance(data, TimedeltaIndex) and freq is None and name is None:
if copy:
return data.copy()
else:
return data._shallow_copy()
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
msg = 'periods must be a number, got {periods}'
raise TypeError(msg.format(periods=periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, freq,
closed=closed)
if unit is not None:
data = to_timedelta(data, unit=unit, box=False)
if not isinstance(data, (np.ndarray, Index, ABCSeries)):
if is_scalar(data):
raise ValueError('TimedeltaIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# convert if not already
if getattr(data, 'dtype', None) != _TD_DTYPE:
data = to_timedelta(data, unit=unit, box=False)
elif copy:
data = np.array(data, copy=True)
# check that we are matching freqs
if verify_integrity and len(data) > 0:
if freq is not None and not freq_infer:
index = cls._simple_new(data, name=name)
inferred = index.inferred_freq
if inferred != freq.freqstr:
on_freq = cls._generate(
index[0], None, len(index), name, freq)
if not np.array_equal(index.asi8, on_freq.asi8):
raise ValueError('Inferred frequency {0} from passed '
'timedeltas does not conform to '
'passed frequency {1}'
.format(inferred, freq.freqstr))
index.freq = freq
return index
if freq_infer:
index = cls._simple_new(data, name=name)
inferred = index.inferred_freq
if inferred:
index.freq = to_offset(inferred)
return index
return cls._simple_new(data, name=name, freq=freq)
@classmethod
def _generate(cls, start, end, periods, name, offset, closed=None):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Of the three parameters: start, end, and '
'periods, exactly two must be specified')
if start is not None:
start = Timedelta(start)
if end is not None:
end = Timedelta(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
index = _generate_regular_range(start, end, periods, offset)
index = cls._simple_new(index, name=name, freq=offset)
if not left_closed:
index = index[1:]
if not right_closed:
index = index[:-1]
return index
@property
def _box_func(self):
return lambda x: Timedelta(x, unit='ns')
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
values = np.array(values, copy=False)
if values.dtype == np.object_:
values = libts.array_to_timedelta64(values)
if values.dtype != _TD_DTYPE:
values = _ensure_int64(values).view(_TD_DTYPE)
result = object.__new__(cls)
result._data = values
result.name = name
result.freq = freq
result._reset_identity()
return result
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_timedelta64
return _get_format_timedelta64(self, box=True)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(TimedeltaIndex, self).__setstate__(state)
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
if freq is not None:
# no need to infer if freq is None
attrs['freq'] = 'infer'
return attrs
def _add_delta(self, delta):
if isinstance(delta, (Tick, timedelta, np.timedelta64)):
new_values = self._add_delta_td(delta)
name = self.name
elif isinstance(delta, TimedeltaIndex):
new_values = self._add_delta_tdi(delta)
# update name when delta is index
name = com._maybe_match_name(self, delta)
else:
raise ValueError("cannot add the type {0} to a TimedeltaIndex"
.format(type(delta)))
result = TimedeltaIndex(new_values, freq='infer', name=name)
return result
def _evaluate_with_timedelta_like(self, other, op, opstr):
# allow division by a timedelta
if opstr in ['__div__', '__truediv__', '__floordiv__']:
if _is_convertible_to_td(other):
other = Timedelta(other)
if isna(other):
raise NotImplementedError(
"division by pd.NaT not implemented")
i8 = self.asi8
if opstr in ['__floordiv__']:
result = i8 // other.value
else:
result = op(i8, float(other.value))
result = self._maybe_mask_results(result, convert='float64')
return Index(result, name=self.name, copy=False)
return NotImplemented
def _add_datelike(self, other):
# adding a timedeltaindex to a datetimelike
from pandas import Timestamp, DatetimeIndex
if other is NaT:
result = self._nat_new(box=False)
else:
other = Timestamp(other)
i8 = self.asi8
result = checked_add_with_arr(i8, other.value)
result = self._maybe_mask_results(result, fill_value=iNaT)
return DatetimeIndex(result, name=self.name, copy=False)
def _sub_datelike(self, other):
from pandas import DatetimeIndex
if other is NaT:
result = self._nat_new(box=False)
else:
raise TypeError("cannot subtract a datelike from a TimedeltaIndex")
return DatetimeIndex(result, name=self.name, copy=False)
def _format_native_types(self, na_rep=u('NaT'),
date_format=None, **kwargs):
from pandas.io.formats.format import Timedelta64Formatter
return Timedelta64Formatter(values=self,
nat_rep=na_rep,
justify='all').get_result()
def _get_field(self, m):
values = self.asi8
hasnans = self.hasnans
if hasnans:
result = np.empty(len(self), dtype='float64')
mask = self._isnan
imask = ~mask
result.flat[imask] = np.array(
[getattr(Timedelta(val), m) for val in values[imask]])
result[mask] = np.nan
else:
result = np.array([getattr(Timedelta(val), m)
for val in values], dtype='int64')
return Index(result, name=self.name)
@property
def days(self):
""" Number of days for each element. """
return self._get_field('days')
@property
def seconds(self):
""" Number of seconds (>= 0 and less than 1 day) for each element. """
return self._get_field('seconds')
@property
def microseconds(self):
"""
Number of microseconds (>= 0 and less than 1 second) for each
element. """
return self._get_field('microseconds')
@property
def nanoseconds(self):
"""
Number of nanoseconds (>= 0 and less than 1 microsecond) for each
element.
"""
return self._get_field('nanoseconds')
@property
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
from pandas import DataFrame
columns = ['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds', 'nanoseconds']
hasnans = self.hasnans
if hasnans:
def f(x):
if isna(x):
return [np.nan] * len(columns)
return x.components
else:
def f(x):
return x.components
result = DataFrame([f(x) for x in self])
result.columns = columns
if not hasnans:
result = result.astype('int64')
return result
def total_seconds(self):
"""
Total duration of each element expressed in seconds.
.. versionadded:: 0.17.0
"""
return Index(self._maybe_mask_results(1e-9 * self.asi8),
name=self.name)
def to_pytimedelta(self):
"""
Return TimedeltaIndex as object ndarray of datetime.timedelta objects
Returns
-------
datetimes : ndarray
"""
return libts.ints_to_pytimedelta(self.asi8)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = np.dtype(dtype)
if is_object_dtype(dtype):
return self.asobject
elif is_timedelta64_ns_dtype(dtype):
if copy is True:
return self.copy()
return self
elif is_timedelta64_dtype(dtype):
# return an index (essentially this is division)
result = self.values.astype(dtype, copy=copy)
if self.hasnans:
return Index(self._maybe_mask_results(result,
convert='float64'),
name=self.name)
return Index(result.astype('i8'), name=self.name)
elif is_integer_dtype(dtype):
return Index(self.values.astype('i8', copy=copy), dtype='i8',
name=self.name)
raise ValueError('Cannot cast TimedeltaIndex to dtype %s' % dtype)
def union(self, other):
"""
Specialized union for TimedeltaIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
this, other = self, other
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, TimedeltaIndex):
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
"""
See Index.join
"""
if _is_convertible_to_index(other):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
return Index.join(self, other, how=how, level=level,
return_indexers=return_indexers,
sort=sort)
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, TimedeltaIndex) and self.freq == other.freq and
self._can_fast_union(other)):
joined = self._shallow_copy(joined, name=name)
return joined
else:
return self._simple_new(joined, name)
def _can_fast_union(self, other):
if not isinstance(other, TimedeltaIndex):
return False
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_end = right[-1]
# concatenate
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = _concat._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self._simple_new(result, name=name, freq=None)
def intersection(self, other):
"""
Specialized intersection for TimedeltaIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _maybe_promote(self, other):
if other.inferred_type == 'timedelta':
other = TimedeltaIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if _is_convertible_to_td(key):
key = Timedelta(key)
return self.get_value_maybe_box(series, key)
try:
return _maybe_box(self, Index.get_value(self, series, key),
series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
if not isinstance(key, Timedelta):
key = Timedelta(key)
values = self._engine.get_value(_values_from_object(series), key)
return _maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if is_list_like(key):
raise TypeError
if isna(key):
key = NaT
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance)
if _is_convertible_to_td(key):
key = Timedelta(key)
return Index.get_loc(self, key, method, tolerance)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timedelta(key)
return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to timedelta according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
"""
assert kind in ['ix', 'loc', 'getitem', None]
if isinstance(label, compat.string_types):
parsed = _coerce_scalar_to_timedelta_type(label, box=True)
lbound = parsed.round(parsed.resolution)
if side == 'left':
return lbound
else:
return (lbound + to_offset(parsed.resolution) -
Timedelta(1, 'ns'))
elif is_integer(label) or is_float(label):
self._invalid_indexer('slice', label)
return label
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
if is_integer(key) or is_float(key) or key is NaT:
self._invalid_indexer('slice', key)
loc = self._partial_td_slice(key, freq, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
def _partial_td_slice(self, key, freq, use_lhs=True, use_rhs=True):
# given a key, try to figure out a location for a partial slice
if not isinstance(key, compat.string_types):
return key
raise NotImplementedError
# TODO(wesm): dead code
# parsed = _coerce_scalar_to_timedelta_type(key, box=True)
# is_monotonic = self.is_monotonic
# # figure out the resolution of the passed td
# # and round to it
# # t1 = parsed.round(reso)
# t2 = t1 + to_offset(parsed.resolution) - Timedelta(1, 'ns')
# stamps = self.asi8
# if is_monotonic:
# # we are out of range
# if (len(stamps) and ((use_lhs and t1.value < stamps[0] and
# t2.value < stamps[0]) or
# ((use_rhs and t1.value > stamps[-1] and
# t2.value > stamps[-1])))):
# raise KeyError
# # a monotonic (sorted) series can be sliced
# left = (stamps.searchsorted(t1.value, side='left')
# if use_lhs else None)
# right = (stamps.searchsorted(t2.value, side='right')
# if use_rhs else None)
# return slice(left, right)
# lhs_mask = (stamps >= t1.value) if use_lhs else True
# rhs_mask = (stamps <= t2.value) if use_rhs else True
# # try to find a the dates
# return (lhs_mask & rhs_mask).nonzero()[0]
@Substitution(klass='TimedeltaIndex')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, (np.ndarray, Index)):
value = np.array(value, dtype=_TD_DTYPE, copy=False)
else:
value = _to_m8(value)
return self.values.searchsorted(value, side=side, sorter=sorter)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'timedelta'
@property
def inferred_type(self):
return 'timedelta64'
@property
def dtype(self):
return _TD_DTYPE
@property
def is_all_dates(self):
return True
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
# try to convert if possible
if _is_convertible_to_td(item):
try:
item = Timedelta(item)
except:
pass
freq = None
if isinstance(item, (Timedelta, libts.NaTType)):
# check freq can be preserved on edge cases
if self.freq is not None:
if ((loc == 0 or loc == -len(self)) and
item + self.freq == self[0]):
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item)
try:
new_tds = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
return TimedeltaIndex(new_tds, name=self.name, freq=freq)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item, compat.string_types):
return self.asobject.insert(loc, item)
raise TypeError(
"cannot insert TimedeltaIndex with incompatible label")
def delete(self, loc):
"""
Make a new DatetimeIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : TimedeltaIndex
"""
new_tds = np.delete(self.asi8, loc)
freq = 'infer'
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(
_ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
return TimedeltaIndex(new_tds, name=self.name, freq=freq)
TimedeltaIndex._add_numeric_methods()
TimedeltaIndex._add_logical_methods_disabled()
TimedeltaIndex._add_datetimelike_methods()
def _is_convertible_to_index(other):
"""
return a boolean whether I can attempt conversion to a TimedeltaIndex
"""
if isinstance(other, TimedeltaIndex):
return True
elif (len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer', 'integer',
'mixed-integer-float', 'mixed')):
return True
return False
def _is_convertible_to_td(key):
return isinstance(key, (DateOffset, timedelta, Timedelta,
np.timedelta64, compat.string_types))
def _to_m8(key):
"""
Timedelta-like => dt64
"""
if not isinstance(key, Timedelta):
# this also converts strings
key = Timedelta(key)
# return an type that can be compared
return np.int64(key.value).view(_TD_DTYPE)
def _generate_regular_range(start, end, periods, offset):
stride = offset.nanos
if periods is None:
b = Timedelta(start).value
e = Timedelta(end).value
e += stride - e % stride
elif start is not None:
b = Timedelta(start).value
e = b + periods * stride
elif end is not None:
e = Timedelta(end).value + stride
b = e - periods * stride
else:
raise ValueError("at least 'start' or 'end' should be specified "
"if a 'period' is given.")
data = np.arange(b, e, stride, dtype=np.int64)
data = TimedeltaIndex._simple_new(data, None)
return data
def timedelta_range(start=None, end=None, periods=None, freq='D',
name=None, closed=None):
"""
Return a fixed frequency TimedeltaIndex, with day as the default
frequency
Parameters
----------
start : string or timedelta-like, default None
Left bound for generating timedeltas
end : string or timedelta-like, default None
Right bound for generating timedeltas
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
name : string, default None
Name of the resulting TimedeltaIndex
closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Returns
-------
rng : TimedeltaIndex
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.timedelta_range(start='1 day', periods=4)
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``closed`` parameter specifies which endpoint is included. The default
behavior is to include both endpoints.
>>> pd.timedelta_range(start='1 day', periods=4, closed='right')
TimedeltaIndex(['2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
Only fixed frequencies can be passed, non-fixed frequencies such as
'M' (month end) will raise.
>>> pd.timedelta_range(start='1 day', end='2 days', freq='6H')
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
'1 days 18:00:00', '2 days 00:00:00'],
dtype='timedelta64[ns]', freq='6H')
"""
return TimedeltaIndex(start=start, end=end, periods=periods,
freq=freq, name=name, closed=closed)
| bsd-3-clause |
georgid/sms-tools | lectures/6-Harmonic-model/plots-code/spectral-peaks.py | 2 | 1160 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512*2
M = 511
t = -60
w = np.hamming(M)
start = .8*fs
hN = N/2
hM = (M+1)/2
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, hN, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
pmag = mX[ploc]
freqaxis = fs*np.arange(N/2)/float(N)
plt.figure(1, figsize=(9, 6))
plt.subplot (2,1,1)
plt.plot(freqaxis,mX,'r', lw=1.5)
plt.axis([0,7000,-80,max(mX)+1])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + peaks (oboe-A4.wav)')
plt.subplot (2,1,2)
plt.plot(freqaxis,pX,'c', lw=1.5)
plt.axis([0,7000,min(pX),10])
plt.plot(fs * iploc / N, ipphase, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + peaks')
plt.tight_layout()
plt.savefig('spectral-peaks.png')
plt.show()
| agpl-3.0 |
numpy/datetime | numpy/core/code_generators/ufunc_docstrings.py | 5 | 83888 | # Docstrings for generated ufuncs
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
res : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10, 101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10])
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added.
Returns
-------
y : {ndarray, scalar}
The sum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return the
angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from above
on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse trigonometric function",
http://en.wikipedia.org/wiki/Inverse_trigonometric_function
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, elementwise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
y : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine elementwise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`y`-coordinate in radians ``[-pi, pi]``. If `x` is a scalar then
a scalar is returned, otherwise an array is returned.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sin(z) = x`. The convention is to return the
angle `z` whose real part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsin` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytical function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from above
on the former and from below on the latter.
The inverse sine is also known as `asin` or ``sin^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse trigonometric function",
http://en.wikipedia.org/wiki/Inverse_trigonometric_function
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine elementwise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then
``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is
in ``[-pi/2, pi/2]``. It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : Calculate the arctan of y/x.
Notes
-----
`arctan` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tan(z) = x`. The convention is to return the
angle `z` whose real part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from the
left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or ``tan^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse trigonometric function",
http://en.wikipedia.org/wiki/Arctan
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be :math:`\\pi/4`:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Elementwise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (ie. branch) is chosen so that ``arctan2(x1, x2)``
is the signed angle in radians between the line segments
``(0,0) - (1,0)`` and ``(0,0) - (x2,x1)``. This function is defined
also for `x2` = 0.
`arctan2` is not defined for complex-valued arguments.
Parameters
----------
x1 : array_like, real-valued
y-coordinates.
x2 : array_like, real-valued
x-coordinates. `x2` must be broadcastable to match the shape of `x1`,
or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan
Notes
-----
`arctan2` is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C standard [2]:
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers.
References
----------
.. [1] Wikipedia, "atan2",
http://en.wikipedia.org/wiki/Atan2
.. [2] ISO/IEC standard 9899:1999, "Programming language C", 1999.
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent elementwise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function that
has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine elementwise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The quotient `x1/x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and division
by zero.
Notes
-----
Equivalent to `x1` / `x2` in terms of array-broadcasting.
Behavior on division by zero can be changed using `seterr`.
When both `x1` and `x2` are of an integer type, `divide` will return
integers and throw away the fractional part. Moreover, division by zero
always yields zero in integer arithmetic.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types:
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic, and does not
raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using `seterr`:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : {ndarray, bool}
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with magnitude
1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
exp : calculate x**p.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than the formula ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values elementwise.
This function returns the absolute values (positive magnitude) of the data
in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy, however, uses the a definition of
`floor` such that `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the Python modulo operator `%`.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Modulo operation where the quotient is `floor(x1/x2)`.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors is
bound by conventions. In `fmod`, the sign of the remainder is the sign of
the dividend. In `remainder`, the sign of the divisor does not affect the
sign of the result.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
Returns
-------
Out : {ndarray, bool}
Output array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
Returns
-------
out : {ndarray, bool}
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finite-ness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is finite; otherwise the values are False (element
is either positive infinity, negative infinity or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Errors result if the second argument is also supplied when `x` is a scalar
input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity, return result as bool
array.
Parameters
----------
x : array_like
Input values
y : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : {ndarray, bool}
For scalar input, the result is a new boolean with value True
if the input is positive or negative infinity; otherwise the value
is False.
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is positive or negative infinity; otherwise the
values are False. If a second argument is supplied the result is
stored there. If the type of that array is a numeric type the result
is represented as zeros and ones, if the type is boolean then as
False and True. The return value `y` is then a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if second argument is also supplied with scalar input or
if first and second arguments have different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for Not a Number (NaN), return result as a bool array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : {ndarray, bool}
For scalar input, the result is a new boolean with value True
if the input is NaN; otherwise the value is False.
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is NaN; otherwise the values are False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
Returns
-------
Out : ndarray of bools
Output array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
less_equal, greater, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return (x1 <= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
Returns
-------
Out : {ndarray, bool}
Output array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
less, greater_equal, greater, equal, not_equal
Examples
--------
>>> np.less_equal([1, 2, 3], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base `e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log10`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base-2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it. `log1p`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 elementwise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x elementwise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 elementwise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2 elementwise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
XOR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_or, logical_not
bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing
the element-wise maxima. If one of the elements being
compared is a nan, then that element is returned. If
both elements are nans then the first is returned. The
latter distinction is important for complex nans,
which are defined as at least one of the real or
imaginary parts being a nan. The net effect is that
nans are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
element-wise minimum
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
Notes
-----
Equivalent to ``np.where(x1 > x2, x1, x2)`` but faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a nan, then that element
is returned. If both elements are nans then the first is returned. The
latter distinction is important for complex nans, which are defined as at
least one of the real or imaginary parts being a nan. The net effect is
that nans are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
element-wise minimum that propagates nans.
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a nan, then the non-nan
element is returned. If both elements are nans then the first is returned.
The latter distinction is important for complex nans, which are defined as
at least one of the real or imaginary parts being a nan. The net effect is
that nans are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
maximum :
element-wise maximum that propagates nans.
minimum :
element-wise minimum that propagates nans.
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
fmin(x1, x2[, out])
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a nan, then the non-nan
element is returned. If both elements are nans then the first is returned.
The latter distinction is important for complex nans, which are defined as
at least one of the real or imaginary parts being a nan. The net effect is
that nans are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
maximum :
element-wise maximum that propagates nans.
minimum :
element-wise minimum that propagates nans.
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Returns an array with the negative of each element of the original array.
Parameters
----------
x : {array_like, scalar}
Input array.
Returns
-------
y : {ndarray, scalar}
Returned array or scalar `y=-x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ones_like',
"""
Returns an array of ones with the same shape and type as a given array.
Equivalent to ``a.copy().fill(1)``.
Please refer to the documentation for `zeros_like`.
See Also
--------
zeros_like
Examples
--------
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.ones_like(a)
array([[1, 1, 1],
[1, 1, 1]])
""")
add_newdoc('numpy.core.umath', 'power',
"""
Returns element-wise base array raised to power from second array.
Raise each base in `x1` to the power of the exponents in `x2`. This
requires that `x1` and `x2` must be broadcastable to the same shape.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division.
For integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes ``x1 - floor(x1 / x2) * x2``.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The remainder of the quotient ``x1/x2``, element-wise. Returns a scalar
if both `x1` and `x2` are scalars.
See Also
--------
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of) integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right by removing `x2` bits at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : {ndarray, scalar}
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x: array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved
and it must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like, bool
Output array.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1: array_like
Values to change the sign of.
x2: array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next representable floating-point value after x1 in the direction
of x2 element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1: array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and nan is nan.
Examples
--------
>>> np.spacing(1, 2) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry
(the mathematical study of triangles). Consider a circle of radius
1 centered on the origin. A ray comes in from the :math:`+x` axis,
makes an angle at the origin (measured counter-clockwise from that
axis), and departs from the origin. The :math:`y` coordinate of
the outgoing ray's intersection with the unit circle is the sine
of that angle. It ranges from -1 for :math:`x=3\\pi / 2` to
+1 for :math:`\\pi / 2.` The function has zeroes where the angle is
a multiple of :math:`\\pi`. Sines of angles between :math:`\\pi` and
:math:`2\\pi` are negative. The numerous properties of the sine and
related functions are included in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The square root of each element in this array is calculated.
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the square-root of
each element in `x`. If any element in `x`
is complex, a complex array is returned. If all of the elements
of `x` are real, negative elements return numpy.nan elements.
See Also
--------
numpy.lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
`sqrt` has a branch cut ``[-inf, 0)`` and is continuous from above on it.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or
``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making ``//``
and ``/`` equivalent operators. The default floor division operation of
``/`` can be replaced by true division with
``from __future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
| bsd-3-clause |
Mark-Ko/data-science-from-scratch | code/introduction.py | 48 | 8085 | from __future__ import division
##########################
# #
# FINDING KEY CONNECTORS #
# #
##########################
users = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" },
{ "id": 10, "name": "Jen" }
]
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# first give each user an empty list
for user in users:
user["friends"] = []
# and then populate the lists with friendships
for i, j in friendships:
# this works because users[i] is the user whose id is i
users[i]["friends"].append(users[j]) # add i as a friend of j
users[j]["friends"].append(users[i]) # add j as a friend of i
def number_of_friends(user):
"""how many friends does _user_ have?"""
return len(user["friends"]) # length of friend_ids list
total_connections = sum(number_of_friends(user)
for user in users) # 24
num_users = len(users)
avg_connections = total_connections / num_users # 2.4
################################
# #
# DATA SCIENTISTS YOU MAY KNOW #
# #
################################
def friends_of_friend_ids_bad(user):
# "foaf" is short for "friend of a friend"
return [foaf["id"]
for friend in user["friends"] # for each of user's friends
for foaf in friend["friends"]] # get each of _their_ friends
from collections import Counter # not loaded by default
def not_the_same(user, other_user):
"""two users are not the same if they have different ids"""
return user["id"] != other_user["id"]
def not_friends(user, other_user):
"""other_user is not a friend if he's not in user["friends"];
that is, if he's not_the_same as all the people in user["friends"]"""
return all(not_the_same(friend, other_user)
for friend in user["friends"])
def friends_of_friend_ids(user):
return Counter(foaf["id"]
for friend in user["friends"] # for each of my friends
for foaf in friend["friends"] # count *their* friends
if not_the_same(user, foaf) # who aren't me
and not_friends(user, foaf)) # and aren't my friends
print friends_of_friend_ids(users[3]) # Counter({0: 2, 5: 1})
interests = [
(0, "Hadoop"), (0, "Big Data"), (0, "HBase"), (0, "Java"),
(0, "Spark"), (0, "Storm"), (0, "Cassandra"),
(1, "NoSQL"), (1, "MongoDB"), (1, "Cassandra"), (1, "HBase"),
(1, "Postgres"), (2, "Python"), (2, "scikit-learn"), (2, "scipy"),
(2, "numpy"), (2, "statsmodels"), (2, "pandas"), (3, "R"), (3, "Python"),
(3, "statistics"), (3, "regression"), (3, "probability"),
(4, "machine learning"), (4, "regression"), (4, "decision trees"),
(4, "libsvm"), (5, "Python"), (5, "R"), (5, "Java"), (5, "C++"),
(5, "Haskell"), (5, "programming languages"), (6, "statistics"),
(6, "probability"), (6, "mathematics"), (6, "theory"),
(7, "machine learning"), (7, "scikit-learn"), (7, "Mahout"),
(7, "neural networks"), (8, "neural networks"), (8, "deep learning"),
(8, "Big Data"), (8, "artificial intelligence"), (9, "Hadoop"),
(9, "Java"), (9, "MapReduce"), (9, "Big Data")
]
def data_scientists_who_like(target_interest):
return [user_id
for user_id, user_interest in interests
if user_interest == target_interest]
from collections import defaultdict
# keys are interests, values are lists of user_ids with that interest
user_ids_by_interest = defaultdict(list)
for user_id, interest in interests:
user_ids_by_interest[interest].append(user_id)
# keys are user_ids, values are lists of interests for that user_id
interests_by_user_id = defaultdict(list)
for user_id, interest in interests:
interests_by_user_id[user_id].append(interest)
def most_common_interests_with(user_id):
return Counter(interested_user_id
for interest in interests_by_user["user_id"]
for interested_user_id in users_by_interest[interest]
if interested_user_id != user_id)
###########################
# #
# SALARIES AND EXPERIENCE #
# #
###########################
salaries_and_tenures = [(83000, 8.7), (88000, 8.1),
(48000, 0.7), (76000, 6),
(69000, 6.5), (76000, 7.5),
(60000, 2.5), (83000, 10),
(48000, 1.9), (63000, 4.2)]
def make_chart_salaries_by_tenure(plt):
tenures = [tenure for salary, tenure in salaries_and_tenures]
salaries = [salary for salary, tenure in salaries_and_tenures]
plt.scatter(tenures, salaries)
plt.xlabel("Years Experience")
plt.ylabel("Salary")
plt.show()
# keys are years
# values are the salaries for each tenure
salary_by_tenure = defaultdict(list)
for salary, tenure in salaries_and_tenures:
salary_by_tenure[tenure].append(salary)
average_salary_by_tenure = {
tenure : sum(salaries) / len(salaries)
for tenure, salaries in salary_by_tenure.items()
}
def tenure_bucket(tenure):
if tenure < 2: return "less than two"
elif tenure < 5: return "between two and five"
else: return "more than five"
salary_by_tenure_bucket = defaultdict(list)
for salary, tenure in salaries_and_tenures:
bucket = tenure_bucket(tenure)
salary_by_tenure_bucket[bucket].append(salary)
average_salary_by_bucket = {
tenure_bucket : sum(salaries) / len(salaries)
for tenure_bucket, salaries in salary_by_tenure_bucket.iteritems()
}
#################
# #
# PAID_ACCOUNTS #
# #
#################
def predict_paid_or_unpaid(years_experience):
if years_experience < 3.0: return "paid"
elif years_experience < 8.5: return "unpaid"
else: return "paid"
######################
# #
# TOPICS OF INTEREST #
# #
######################
words_and_counts = Counter(word
for user, interest in interests
for word in interest.lower().split())
if __name__ == "__main__":
print
print "######################"
print "#"
print "# FINDING KEY CONNECTORS"
print "#"
print "######################"
print
print "total connections", total_connections
print "number of users", num_users
print "average connections", total_connections / num_users
print
# create a list (user_id, number_of_friends)
num_friends_by_id = [(user["id"], number_of_friends(user))
for user in users]
print "users sorted by number of friends:"
print sorted(num_friends_by_id,
key=lambda (user_id, num_friends): num_friends, # by number of friends
reverse=True) # largest to smallest
print
print "######################"
print "#"
print "# DATA SCIENTISTS YOU MAY KNOW"
print "#"
print "######################"
print
print "friends of friends bad for user 0:", friends_of_friend_ids_bad(users[0])
print "friends of friends for user 3:", friends_of_friend_ids(users[3])
print
print "######################"
print "#"
print "# SALARIES AND TENURES"
print "#"
print "######################"
print
print "average salary by tenure", average_salary_by_tenure
print "average salary by tenure bucket", average_salary_by_bucket
print
print "######################"
print "#"
print "# MOST COMMON WORDS"
print "#"
print "######################"
print
for word, count in words_and_counts.most_common():
if count > 1:
print word, count | unlicense |
mehdidc/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
Tasignotas/topographica_mirror | topo/command/pylabplot.py | 1 | 26241 | """
Line-based and matrix-based plotting commands using MatPlotLib.
Before importing this file, you will probably want to do something
like:
from matplotlib import rcParams
rcParams['backend']='TkAgg'
to select a backend, or else select an appropriate one in your
matplotlib.rc file (if any). There are many backends available for
different GUI or non-GUI uses.
"""
try:
from matplotlib import pylab as plt
except ImportError:
import param
param.Parameterized(name=__name__).warning("Could not import matplotlib; module will not be useable.")
from topo.command import ImportErrorRaisingFakeModule
plt = ImportErrorRaisingFakeModule("matplotlib") # pyflakes:ignore (try/except import)
import param
import numpy as np
from numpy.fft.fftpack import fft2
from numpy.fft.helper import fftshift
import topo
from topo.base.sheet import Sheet
from topo.base.arrayutil import wrap
from topo.plotting.plot import make_template_plot
from param import ParameterizedFunction, normalize_path
from param.parameterized import ParamOverrides
from holoviews import Overlay
from holoviews.plotting import OverlayPlot, GridLayout, CurvePlot
from topo.command import Command
class PylabPlotCommand(Command):
"""Parameterized command for plotting using Matplotlib/Pylab."""
file_dpi = param.Number(
default=100.0,bounds=(0,None),softbounds=(0,1000),doc="""
Default DPI when rendering to a bitmap.
The nominal size * the dpi gives the final image size in pixels.
E.g.: 4"x4" image * 80 dpi ==> 320x320 pixel image.""")
file_format = param.String(default="png",doc="""
Which image format to use when saving images.
The output can be png, ps, pdf, svg, or any other format
supported by Matplotlib.""")
# JABALERT: Should replace this with a filename_format and
# associated parameters, as in PlotGroupSaver.
# Also should probably allow interactive display to be controlled
# separately from the filename, to make things work more similarly
# with and without a GUI.
filename = param.String(default=None,doc="""
Optional base of the filename to use when saving images;
if None the plot will be displayed interactively.
The actual name is constructed from the filename base plus the
suffix plus the current simulator time plus the file_format.""")
filename_suffix = param.String(default="",doc="""
Optional suffix to be used for disambiguation of the filename.""")
title = param.String(default=None,doc="""
Optional title to be used when displaying the plot interactively.""")
display_window = param.Boolean(default=True, doc="""
Whether to open a display window containing the plot when
Topographica is running in a non-batch mode.""")
__abstract = True
def _set_windowtitle(self,title):
"""
Helper function to set the title (if not None) of this PyLab plot window.
"""
# At the moment, PyLab does not offer a window-manager-independent
# means for controlling the window title, so what we do is to try
# what should work with Tkinter, and then suppress all errors. That
# way we should be ok when rendering to a file-based backend, but
# will get nice titles in Tk windows. If other toolkits are in use,
# the title can be set here using a similar try/except mechanism, or
# else there can be a switch based on the backend type.
if title is not None:
try:
manager = plt.get_current_fig_manager()
manager.window.title(title)
except:
pass
def _generate_figure(self,p):
"""
Helper function to display a figure on screen or save to a file.
p should be a ParamOverrides instance containing the current
set of parameters.
"""
plt.show._needmain=False
if p.filename is not None:
# JABALERT: need to reformat this as for other plots
fullname=p.filename+p.filename_suffix+str(topo.sim.time())+"."+p.file_format
plt.savefig(normalize_path(fullname), dpi=p.file_dpi)
elif p.display_window:
self._set_windowtitle(p.title)
plt.show()
else:
plt.close(plt.gcf())
class vectorplot(PylabPlotCommand):
"""
Simple line plotting for any vector or list of numbers.
Intended for interactive debugging or analyzing from the command
prompt. See MatPlotLib's pylab functions to create more elaborate
or customized plots; this is just a simple example.
An optional string can be supplied as a title for the figure, if
desired. At present, this is only used for the window, not the
actual body of the figure (and will thus not appear when the
figure is saved).
The style argument allows different line/linespoints style for
the plot: 'r-' for red solid line, 'bx' for blue x-marks, etc.
See http://matplotlib.sourceforge.net/matplotlib.pylab.html#-plot
for more possibilities.
The label argument can be used to identify the line in a figure legend.
Ordinarily, the x value for each point on the line is the index of
that point in the vec array, but a explicit list of xvalues can be
supplied; it should be the same length as vec.
Execution of multiple vectorplot() commands with different styles
will result in all those styles overlaid on a single plot window.
"""
# JABALERT: All but the first two arguments should probably be Parameters
def __call__(self,vec,xvalues=None,style='-',label=None,**params):
p=ParamOverrides(self,params)
fig = plt.figure()
if xvalues is not None:
plt.plot(xvalues, vec, style, label=label)
else:
plt.plot(vec, style, label=label)
plt.grid(True)
self._generate_figure(p)
return fig
class matrixplot(PylabPlotCommand):
"""
Simple plotting for any matrix as a bitmap with axes.
Like MatLab's imagesc, scales the values to fit in the range 0 to 1.0.
Intended for interactive debugging or analyzing from the command
prompt. See MatPlotLib's pylab functions to create more elaborate
or customized plots; this is just a simple example.
"""
plot_type = param.Callable(default=plt.gray,doc="""
Matplotlib command to generate the plot, e.g. plt.gray or plt.hsv.""")
extent = param.Parameter(default=None,doc="""
Subregion of the matrix to plot, as a tuple (l,b,r,t).""")
# JABALERT: All but the first two should probably be Parameters
def __call__(self, mat, aspect=None, colorbar=True, **params):
p = ParamOverrides(self, params)
fig = plt.figure(figsize=(5, 5))
p.plot_type()
# Swap lbrt to lrbt to match pylab
if p.extent is None:
extent = None
else:
(l, b, r, t) = p.extent
extent = (l, r, b, t)
plt.imshow(mat, interpolation='nearest', aspect=aspect, extent=extent)
if colorbar and (mat.min() != mat.max()): plt.colorbar()
self._generate_figure(p)
return fig
class matrixplot3d(PylabPlotCommand):
"""
Simple plotting for any matrix as a 3D wireframe with axes.
Uses Matplotlib's beta-quality features for 3D plotting. These
usually work fine for wireframe plots, although they don't always
format the axis labels properly, and do not support removal of
hidden lines. Note that often the plot can be rotated within the
window to make such problems go away, and then the best result can
be saved if needed.
Other than the default "wireframe", the type can be "contour" to
get a contour plot, or "surface" to get a solid surface plot, but
surface plots currently fail in many cases, e.g. for small
matrices.
If you have trouble, you can try matrixplot3d_gnuplot instead.
"""
def __call__(self, mat, type="wireframe", **params):
p = ParamOverrides(self, params)
from mpl_toolkits.mplot3d import axes3d
fig = plt.figure()
ax = axes3d.Axes3D(fig)
# Construct matrices for r and c values
rn, cn = mat.shape
c = np.outer(np.ones(rn), np.arange(cn * 1.0))
r = np.outer(np.arange(rn * 1.0), np.ones(cn))
if type == "wireframe":
ax.plot_wireframe(r, c, mat)
elif type == "surface":
# Sometimes fails for no obvious reason
ax.plot_surface(r, c, mat)
elif type == "contour":
# Works but not usually very useful
ax.contour3D(r, c, mat)
else:
raise ValueError("Unknown plot type " + str(type))
ax.set_xlabel('R')
ax.set_ylabel('C')
ax.set_zlabel('Value')
self._generate_figure(p)
class matrixplot3dx3(PylabPlotCommand):
"""
Plot three matching matrices x,y,z as a 3D wireframe with axes.
See matrixplot3d for caveats and description; this plot is the
same but instead of using implicit r,c values of the matrix, allows
them to be specified directly, thus plotting a series of 3D points.
"""
def __call__(self,x,y,z,labels=["X","Y","Z"],type="wireframe",**params):
p = ParamOverrides(self, params)
from mpl_toolkits.mplot3d import axes3d
fig = plt.figure()
ax = axes3d.Axes3D(fig)
if type == "wireframe":
ax.plot_wireframe(x, y, z)
elif type == "surface":
ax.plot_surface(x, y, z)
elif type == "contour":
ax.contour3D(x, y, z)
else:
raise ValueError("Unknown plot type " + str(type))
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_zlabel(labels[2])
self._generate_figure(p)
return fig
class histogramplot(PylabPlotCommand):
"""
Compute and plot the histogram of the supplied data.
See help(plt.hist) for help on the histogram function itself.
If given, colors is an iterable collection of matplotlib.colors
(see help (matplotlib.colors) ) specifying the bar colors.
Example use:
histogramplot([1,1,1,2,2,3,4,5],title='hist',colors='rgb',bins=3,normed=1)
"""
# JABALERT: All but the first two arguments should probably be Parameters
def __call__(self, data, colors=None, **params):
p = ParamOverrides(self, params, allow_extra_keywords=True)
fig = plt.figure(figsize=(4, 2))
n, bins, bars = plt.hist(data, **(p.extra_keywords()))
# if len(bars)!=len(colors), any extra bars won't have their
# colors changed, or any extra colors will be ignored.
if colors: [bar.set_fc(color) for bar, color in zip(bars, colors)]
self._generate_figure(p)
return fig
class gradientplot(matrixplot):
"""
Compute and show the gradient plot of the supplied data.
Translated from Octave code originally written by Yoonsuck Choe.
If the data is specified to be cyclic, negative differences will
be wrapped into the range specified (1.0 by default).
"""
# JABALERT: All but the first two arguments should probably be Parameters
def __call__(self, data, cyclic_range=1.0, **params):
p = ParamOverrides(self, params)
r, c = data.shape
dx = np.diff(data, 1, axis=1)[0:r - 1, 0:c - 1]
dy = np.diff(data, 1, axis=0)[0:r - 1, 0:c - 1]
if cyclic_range is not None: # Wrap into the specified range
# Convert negative differences to an equivalent positive value
dx = wrap(0, cyclic_range, dx)
dy = wrap(0, cyclic_range, dy)
#
# Make it increase as gradient reaches the halfway point,
# and decrease from there
dx = 0.5 * cyclic_range - np.abs(dx - 0.5 * cyclic_range)
dy = 0.5 * cyclic_range - np.abs(dy - 0.5 * cyclic_range)
return super(gradientplot, self).__call__(np.sqrt(dx*dx + dy*dy), **p)
class fftplot(matrixplot):
"""
Compute and show the 2D Fast Fourier Transform (FFT) of the supplied data.
Example:: fftplot(topo.sim["V1"].views.Maps["OrientationPreference"].data,filename="out")
"""
def __call__(self, data, **params):
p = ParamOverrides(self, params)
fft_plot = 1 - np.abs(fftshift(fft2(data - 0.5, s=None, axes=(-2, -1))))
return super(fftplot, self).__call__(fft_plot, **p)
class autocorrelationplot(matrixplot):
"""
Compute and show the 2D autocorrelation of the supplied data.
Requires the external SciPy package.
Example:: autocorrelationplot(topo.sim["V1"].views.Maps["OrientationPreference"].data,filename="out")
"""
plot_type = param.Callable(default=plt.autumn)
def __call__(self, data, **params):
p = ParamOverrides(self, params)
import scipy.signal
mat = scipy.signal.correlate2d(data, data)
return super(autocorrelationplot, self).__call__(mat, **p)
class activityplot(matrixplot):
"""
Plots the activity in a sheet with axis labels in Sheet (not matrix) coordinates.
Same as matrixplot, but only for matrices associated with a Sheet.
By default plots the Sheet's activity, but any other matrix of the
same size may be supplied for plotting in these coordinates instead.
"""
def __call__(self, sheet, mat=None, **params):
p = ParamOverrides(self, params)
if p.extent is None: p.extent = sheet.bounds.aarect().lbrt()
if mat is None: mat = sheet.activity
return super(activityplot, self).__call__(mat, **p)
class xy_grid(PylabPlotCommand):
"""
By default, plot the x and y coordinate preferences as a grid.
"""
axis = param.Parameter(default=[-0.5,0.5,-0.5,0.5],doc="""
Four-element list of the plot bounds, i.e. [xmin, xmax, ymin, ymax].""")
skip = param.Integer(default=1,bounds=[1,None],softbounds=[1,10],doc="""
Plot every skipth line in each direction.
E.g. skip=4 means to keep only every fourth horizontal line
and every fourth vertical line, except that the first and last
are always included. The default is to include all data points.""")
x = param.Array(doc="Numpy array of x positions in the grid.")
y = param.Array(doc= "Numpy array of y positions in the grid." )
def __call__(self, **params):
p = ParamOverrides(self, params)
fig = plt.figure(figsize=(5, 5))
# This one-liner works in Octave, but in matplotlib it
# results in lines that are all connected across rows and columns,
# so here we plot each line separately:
# plt.plot(x,y,"k-",transpose(x),transpose(y),"k-")
# Here, the "k-" means plot in black using solid lines;
# see matplotlib for more info.
isint = plt.isinteractive() # Temporarily make non-interactive for
# plotting
plt.ioff()
for r, c in zip(p.y[::p.skip], p.x[::p.skip]):
plt.plot(c, r, "k-")
for r, c in zip(np.transpose(p.y)[::p.skip],np.transpose(p.x)[::p.skip]):
plt.plot(c, r, "k-")
# Force last line avoid leaving cells open
if p.skip != 1:
plt.plot(p.x[-1], p.y[-1], "k-")
plt.plot(np.transpose(p.x)[-1], np.transpose(p.y)[-1], "k-")
plt.xlabel('x')
plt.ylabel('y')
# Currently sets the input range arbitrarily; should presumably figure out
# what the actual possible range is for this simulation (which would presumably
# be the maximum size of any GeneratorSheet?).
plt.axis(p.axis)
if isint: plt.ion()
self._generate_figure(p)
return fig
class topographic_grid(xy_grid):
"""
By default, plot the XPreference and YPreference preferences for all
Sheets for which they are defined, using MatPlotLib.
If sheet_views other than XPreference and YPreference are desired,
the names of these can be passed in as arguments.
"""
xsheet_view_name = param.String(default='XPreference',doc="""
Name of the Matrix holding the X position locations.""")
ysheet_view_name = param.String(default='YPreference',doc="""
Name of the Matrix holding the Y position locations.""")
# Disable and hide parameters inherited from the base class
x = param.Array(constant=True, precedence=-1)
y = param.Array(constant=True, precedence=-1)
def __call__(self, **params):
p = ParamOverrides(self, params)
for sheet in topo.sim.objects(Sheet).values():
if ((p.xsheet_view_name in sheet.views.Maps) and
(p.ysheet_view_name in sheet.views.Maps)):
x = sheet.views.Maps[p.xsheet_view_name].last.data
y = sheet.views.Maps[p.ysheet_view_name].last.data
filename_suffix = "_" + sheet.name
title = 'Topographic mapping to ' + sheet.name + ' at time ' \
+ topo.sim.timestr()
super(topographic_grid, self).__call__(x=x, y=y, title=title,
filename_suffix=filename_suffix)
class overlaid_plot(PylabPlotCommand):
"""
Use matplotlib to make a plot combining a bitmap and line-based
overlays for a single plot template and sheet.
"""
plot_template = param.Dict(default={'Hue': 'OrientationPreference'}, doc="""
Template for the underlying bitmap plot.""")
overlay = param.List(default=[('contours', 'OcularPreference', 0.5, 'black'),
('arrows', 'DirectionPreference',
'DirectionSelectivity', 'white')],
doc="""
List of overlaid plots, where each list item may be a 4-tuple
specifying either a contour line or a field of arrows::
('contours',map-name,contour-value,line-color)
('arrows',arrow-location-map-name,arrow-size-map-name,arrow-color)
Any number or combination of contours and arrows may be supplied.""")
normalize = param.Boolean(default='Individually', doc="""
Type of normalization, if any, to use. Options include 'None',
'Individually', and 'AllTogether'. See
topo.plotting.plotgroup.TemplatePlotGroup.normalize for more
details.""")
sheet = param.ClassSelector(class_=topo.base.sheet.Sheet, doc="""
The sheet from which sheetViews are to be obtained for plotting.""")
def __call__(self, **params):
p=ParamOverrides(self,params)
name=p.plot_template.keys().pop(0)
plot=make_template_plot(p.plot_template,
p.sheet.views.Maps, p.sheet.xdensity,p.sheet.bounds,
p.normalize,name=p.plot_template[name])
fig = plt.figure(figsize=(5,5))
if plot:
bitmap=plot.bitmap
isint=plt.isinteractive() # Temporarily make non-interactive for plotting
plt.ioff() # Turn interactive mode off
plt.imshow(bitmap.image,origin='lower',interpolation='nearest')
plt.axis('off')
for (t,pref,sel,c) in p.overlay:
v = plt.flipud(p.sheet.views.Maps[pref].view()[0])
if (t=='contours'):
plt.contour(v,[sel,sel],colors=c,linewidths=2)
if (t=='arrows'):
s = plt.flipud(p.sheet.views.Maps[sel].view()[0])
scale = int(np.ceil(np.log10(len(v))))
X = np.array([x for x in xrange(len(v)/scale)])
v_sc = np.zeros((len(v)/scale,len(v)/scale))
s_sc = np.zeros((len(v)/scale,len(v)/scale))
for i in X:
for j in X:
v_sc[i][j] = v[scale*i][scale*j]
s_sc[i][j] = s[scale*i][scale*j]
plt.quiver(scale*X, scale*X, -np.cos(2*np.pi*v_sc)*s_sc,
-np.sin(2*np.pi*v_sc)*s_sc, color=c,
edgecolors=c, minshaft=3, linewidths=1)
p.title='%s overlaid with %s at time %s' %(plot.name,pref,topo.sim.timestr())
if isint: plt.ion()
p.filename_suffix="_"+p.sheet.name
self._generate_figure(p)
return fig
class overlaid_plots(overlaid_plot):
"""
Use matplotlib to make a plot combining a bitmap and line-based overlays.
"""
plot_template = param.List(default=[{'Hue':'OrientationPreference'}],doc="""
Template for the underlying bitmap plot.""")
# Disable and hide parameters inherited from the base class
sheet = param.ClassSelector(class_=topo.base.sheet.Sheet, constant=True, precedence=-1)
def __call__(self,**params):
p=ParamOverrides(self,params)
for template in p.plot_template:
for sheet in topo.sim.objects(Sheet).values():
if getattr(sheet, "measure_maps", False):
super(overlaid_plots, self).__call__(sheet=sheet, plot_template=template,
overlay=p.overlay, normalize=p.normalize)
class tuning_curve(PylabPlotCommand):
"""
Plot a tuning curve for a feature, such as orientation, contrast, or size.
The curve datapoints are collected from the curve_dict for
the units at the specified coordinates in the specified sheet
(where the units and sheet may be set by a GUI, using
topo.analysis.featureresponses.UnitCurveCommand.sheet and
topo.analysis.featureresponses.UnitCurveCommand.coords,
or by hand).
"""
center = param.Boolean(default=True, doc="""
Centers the tuning curve around the maximally responding feature.""")
coords = param.List(default=[(0 , 0)], doc="""
List of coordinates of units to measure.""")
group_by = param.List(default=['Contrast'], doc="""
Feature dimensions for which curves are overlaid.""")
legend = param.Boolean(default=True, doc="""
Whether or not to include a legend in the plot.""")
relative_labels = param.Boolean(default=False, doc="""
Relabel the x-axis with values relative to the preferred.""")
sheet = param.ObjectSelector(default=None, doc="""
Name of the sheet to use in measurements.""")
x_axis = param.String(default='', doc="""
Feature to plot on the x axis of the tuning curve""")
# Disable and hide parameters inherited from the base class
coord = param.NumericTuple(constant=True, precedence=-1)
def __call__(self, **params):
p = ParamOverrides(self, params, allow_extra_keywords=True)
x_axis = p.x_axis.capitalize()
vmap = p.sheet.views.Curves[x_axis.capitalize()+"Tuning"]
time = vmap.dim_range('Time')[1]
curves = []
if vmap.dimension_labels[0] == 'X':
for coord in p.coords:
x, y = coord
current_map = vmap[x, y, time, :, :, :]
curve_map = current_map.sample(X=x, Y=y).collate(p.x_axis.capitalize())
curves.append(curve_map.overlay(p.group_by))
else:
current_map = vmap[time, :, :, :]
curve_map = current_map.sample(p.coords).collate(p.x_axis.capitalize())
overlaid_curves = curve_map.overlay(p.group_by)
if not isinstance(curves, GridLayout): curves = [overlaid_curves]
figs = []
for coord, curve in zip(p.coords,curves):
fig = plt.figure()
ax = plt.subplot(111)
plot = OverlayPlot if isinstance(curve.last, Overlay) else CurvePlot
plot(curve, center=p.center, relative_labels=p.relative_labels,
show_legend=p.legend)(ax)
self._generate_figure(p, fig)
figs.append((coord, fig))
return figs
def _generate_figure(self, p, fig):
"""
Helper function to display a figure on screen or save to a file.
p should be a ParamOverrides instance containing the current
set of parameters.
"""
plt.show._needmain=False
if p.filename is not None:
# JABALERT: need to reformat this as for other plots
fullname=p.filename+p.filename_suffix+str(topo.sim.time())+"."+p.file_format
fig.savefig(normalize_path(fullname), dpi=p.file_dpi)
elif p.display_window:
self._set_windowtitle(p.title)
fig.show()
else:
fig.close()
cyclic_tuning_curve = tuning_curve
def cyclic_unit_tuning_curve(coord=(0, 0), **kwargs):
return tuning_curve(coords=[coord], **kwargs)[0]
def plot_cfproj_mapping(dest,proj='Afferent',style='b-'):
"""
Given a CF sheet receiving a CFProjection, plot
the mapping of the dests CF centers on the src sheet.
"""
if isinstance(dest,str):
from topo import sim
dest = sim[dest]
plot_coord_mapping(dest.projections()[proj].coord_mapper,
dest,style=style)
# JABALERT: not sure whether this is currently used
def plot_coord_mapping(mapper,sheet,style='b-'):
"""
Plot a coordinate mapping for a sheet.
Given a CoordinateMapperFn (as for a CFProjection) and a sheet
of the projection, plot a grid showing where the sheet's units
are mapped.
"""
from pylab import plot,hold,ishold
xs = sheet.sheet_rows()
ys = sheet.sheet_cols()
hold_on = ishold()
if not hold_on:
plot()
hold(True)
for y in ys:
pts = [mapper(x,y) for x in xs]
plot([u for u,v in pts],
[v for u,v in pts],
style)
for x in xs:
pts = [mapper(x,y) for y in ys]
plot([u for u,v in pts],
[v for u,v in pts],
style)
hold(hold_on)
import types
__all__ = list(set([k for k,v in locals().items()
if isinstance(v,types.FunctionType) or
(isinstance(v,type) and issubclass(v,ParameterizedFunction))
and not v.__name__.startswith('_')]))
| bsd-3-clause |
xidus/ted | ted/sdss/__init__.py | 1 | 15412 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Version: Sat 27 Apr 2013
# Initial build.
#
"""
The purpose of this module is to provide helper functions for
downloading and handling SDSS imaging data for the Stripe 82 survey.
TODO
----
X Make a csv-file with all unique frames. (bash script does this, but header has to be moved from the bottom to the top afterwards.)
X Make a list of the number of recorded frames that covers each SN. (How did I make it before?)
X Make CAS_get_fields() check how far the download is,
by looking at what has already been downloaded.
* Automate making a list of the relative paths to the downloaded images
* <del>Make CAS_get_field_single() check if the file already exists</del>
"""
import os
import requests
import numpy as np
import pandas as pd
from .. import env
from .. import _pkg_home_dir
from ..parse import ra2deg, dec2deg
_path_data = env.paths.get('data')
_path_das = env.paths.get('das')
_path_cas = env.paths.get('cas')
_path_sql = env.paths.get('sql')
_proxies = env.proxies
# Add locations of snlists
env.files['snlist_902'] = os.path.join(
_pkg_home_dir, 'sdss', 'data', 'snlist_confirmed.csv')
env.files['snlist_1030'] = os.path.join(
_pkg_home_dir, 'sdss', 'data', 'snlist_confirmed_updated.csv')
"""
Content
-------
__Functions:__
* iscoadded
* URL_exists
* load_SNe_candidate_list
* merge_sne_lists
* sql_fill_table_SNe
"""
###############################################################################
def iscoadded(run):
"""Test a single run number."""
return run in (106, 206)
###############################################################################
def URI_exists(uri):
return requests.head(uri).status_code in (200, 302)
###############################################################################
def load_SNe_candidate_list():
"""
Loads the SNe list from the CSV-file.
Column titles:
--------------
SDSS SN Id : string
Type : string
IAUC Name : string
Right Ascension (hh:mm:ss) : float
Declination(dd:mm:ss) : float
Redshift : float
Peak MJD (approx) : float
Returns
-------
pd.DataFrame object with the SNe
"""
ifname = env.files.get('snlist')
if not os.path.exists(ifname):
# get_snlist()
merge_sne_lists()
return pd.read_csv(ifname, sep=';')
###############################################################################
def merge_sne_lists():
"""
Reads in the two lists of SNe candidates and merges them into one
bigger list (it turns out). This list is then having an extra column
added with a special flag, and the final list is saved as .csv
"""
fn_snlist_sdss_org = env.files.get('snlist_902')
fn_snlist_SNII_upd = env.files.get('snlist_1030')
if not (
os.path.isfile(fn_snlist_sdss_org) and
os.path.isfile(fn_snlist_SNII_upd)):
raise SystemExit('''\
Source files do not exist!\
Please add them manually first.'''
)
ofn_snlists_merged = env.files.get('snlist')
def col_count(df):
# names = ['SDSS_id', 'SN_type', 'IAUC_id', 'redshift', 'Peak_MJD']
# tests = ['', '', '', np.nan, np.nan]
names = ['SN_type', 'IAUC_id', 'redshift', 'Peak_MJD']
tests = ['', '', np.nan, np.nan]
counts = [(df_right[name].values != test).sum() for name, test in zip(names, tests)]
print ('{: >10s} ' * len(names)).format(*names)
print ('{: >10d} ' * len(names)).format(*counts)
print ''
# String to value or NaN (hence the name valornan)
snid_sortable = lambda SDSS_id: 'SN{:0>5d}'.format(int(SDSS_id[2:]))
s2valornan = lambda s: s or np.nan
conv = dict(SDSS_id=snid_sortable, Ra=ra2deg, Dec=dec2deg,
redshift=s2valornan, Peak_MJD=s2valornan)
df_left = pd.read_csv(fn_snlist_sdss_org, sep=';', converters=conv)
df_right = pd.read_csv(fn_snlist_SNII_upd, sep=';', converters=conv)
SDSS_id_left = list(df_left.SDSS_id)
SDSS_id_right = list(df_right.SDSS_id)
SDSS_ids = np.array(SDSS_id_left + SDSS_id_right)
SNids = np.unique(SDSS_ids)
SNids.sort()
columns = list(df_left.columns)
# print columns
# raise SystemExit
ncols = len(columns) - 1
df = pd.DataFrame(columns=columns)
tests = [''] * 2 + [np.nan] * 4
# raise SystemExit
for SNid in SNids:
# print SNid,
lix = (df_left.SDSS_id == SNid)
rix = (df_right.SDSS_id == SNid)
ls = lix.sum()
rs = rix.sum()
# First, do both sides have the SDSS_id?
# If not, just add the given one to the DataFrame.
if ls and rs:
# print 'is a duplicate ...'
row_left = df_left[lix]
row_right = df_right[rix]
values = row_left.copy()
# print values.values[0, 0], values.shape
# raise SystemExit
test_left = [(row_left[col].values[0] != test) for col, test in zip(columns, tests)]
test_right = [(row_right[col].values[0] != test) for col, test in zip(columns, tests)]
for i in range(1, ncols):
col = columns[i]
if test_left[i] and test_right[i]:
# Both have valid values
# Choose the value from the newest list
values.values[0, i] = row_right.values[0, i]
elif test_right[i]:
values.values[0, i] = row_right.values[0, i]
else:
values.values[0, i] = row_left.values[0, i]
df = df.append(values)
elif rs:
# print 'is unique (right) ...'
df = df.append(df_right[rix])
elif ls:
# print 'is unique (left) ...'
df = df.append(df_left[lix])
df.sort_index(by='SDSS_id', ascending=True)
"""Check for duplicate coordinates"""
report = ''
# Check for duplicate coordinate pairs
coords = np.array([])
for i in range(df.shape[0]):
coords = np.append(coords, '{:014.9f}_{:014.9f}'.format(
df.Ra.values[i], df.Dec.values[i])
)
ucoords, indices = np.unique(coords, return_inverse=True)
report += 'Number of list entries: {: >4d}\n'.format(df.shape[0])
report += 'Number of unique entries: {: >4d}\n'.format(ucoords.size)
# print 'Number of unique entry IDs: {: >4d}'.format(np.unique(snlist.SDSS_id.values).size)
# `indices` is an array of the same length as `coords`
# An entry in `indices` is itself an index for an entry in `ucoords`,
# i.e. `coords[i]` is `ucoords[indices[i]]`, `coords.size == indices.size`
# Thus repeated entries in `indices` means that there were more than one
# entry in `coords` which held this value; in this case a coordinate pair.
duplicates = []
for ix in np.unique(indices):
if (indices == ix).sum() > 1:
# There is a duplicate of this coordinate
# Save the index for `ucoords`
duplicates.append(ix)
# Now we have the indices for the entries in `ucoords` whose values in
# `coords` appear more than once. Let's retrieve the corresponding indices
# for these values in `coords`.
coord_indices = []
for ix in duplicates:
report += '\n'
for i, uc in enumerate(ucoords[indices[indices == ix]]):
if i == 0:
# We only need to search for the indices of the duplicates
# from one of the duplicates.
coord_indices.append((coords == uc).nonzero()[0])
# Report the actual coordinate strings for evry duplicate so that a
# visual inspection can also verify that they are in fact congruent
report += '{}'.format(uc)
report += '\nIndices of `ucoords`: {}'.format(duplicates)
report += '\nIndices of `coords`: {}'.format(repr(coord_indices))
report += '\n'
report += 'Entries from snlist:'
for cices in coord_indices:
report += '\n'
report += '{}'.format(df.iloc[cices])
# Selection of the entry in the list which gets to stay.
# I choose the first occurrence, since the list is at this point already
# sorted after SDSS_id which increases with time.
# For this selection to be fair, the duplicate coordinates have to also
# refer to the same object.
# How do I know this?
# I looked at the output of the previous for-loop, and each pair of entries
# of the three duplicate coordinates had an estimated Modified Julian Date
# Peak time which were separated in time by an interval that is of the
# same order as the timespan in which a supernova is typically visible.
# It is interesting that the survey found at least two peak dates for what
# I, at least for now, assume is the same object. or two very closely
# located objects. I do not know what the odds of spotting two different
# events along the same line of sight and coming from two separate galaxies
# within this relatively short amount of time; and considering three such
# events within such a short list seems even less plausible.
# POP the last of the two (or in principle more) entries from the list,
# before saving it.
# OR simply let Pandas do ALL the above work and also remove the duplicates.
# report is still useful for the report, i.e. with the argument above.
df.drop_duplicates(cols=['Ra', 'Dec'], inplace=True)
with open(env.files.get('log_snlist'), 'w+') as fsock:
fsock.write(report)
if 0:
"""Add flag"""
# ADDING A FLAG
# Check if confirmed by IAUC, i.e. that it has an ID.
confirmed = (df['IAUC_id'].values != np.nan)
flags = np.zeros_like(df['IAUC_id'].values).astype(str)
flags[confirmed] = 'C'
flags[~confirmed] = ''
df['Flag'] = flags
print 'Confirmed SNe (has IAUC ID):', confirmed.sum()
print 'Internally confirmed SNe:', (~confirmed).sum()
df.to_csv(ofn_snlists_merged, sep=';', index=False, header=True)
###############################################################################
def sql_fill_table_SNe():
"""
Loads merged list of SNe candidates and
inserts the records into an SQLite3 db.
"""
import sqlite3
ofn_sqlite_db = env.files.get('db')
fn_create_table_sne = os.path.join(_path_sql, 'skyml_create_table_SNe.sql')
df = load_SNe_candidate_list()
# # Read SQL
with open(fn_create_table_sne, 'r') as fsock:
sql_create_table_sne = fsock.read()
# Connect to database file
with sqlite3.connect(ofn_sqlite_db) as con:
cur = con.cursor()
cur.execute('DROP TABLE IF EXISTS Supernovae')
cur.execute(sql_create_table_sne)
sql_insert = '''\
INSERT INTO Supernovae
(SDSS_id, SN_type, IAUC_id, Ra, Dec, redshift, Peak_MJD)
VALUES
(?, ?, ?, ?, ?, ?, ?)
'''
# cur.executemany(sql_insert, df.values)
for i, row in enumerate(df.values):
cur.execute(sql_insert, row)
con.commit()
# df.to_sql(name='Supernovae', con=con, if_exists='replace')
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
def get_snlist():
"""NOT IMPLEMENTED YET."""
return
# The best way to do this seems to be to grab the HTML table and manipulate it manually.
# Download the lists as HTML tables
# Edit their source codes to get .csv format.
# Use the merge function to merge the lists into the big master list.
# The name of the list is currently given by
if 0:
# List of
URI_snlist = 'http://www.sdss.org/supernova/snlist.dat'
fname = os.path.basename(URI_snlist)
# File-name path to local copy of snlist.dat
ofname = os.path.join(_path_data, fname)
# File-name path to local copy of snlist.csv (to be loaded from Pandas)
ifname = os.path.join(_path_data, os.path.splitext(fname)[0] + '.csv')
if 1 or not os.path.exists(ifname):
if not os.path.exists(ofname):
print 'Downloading {} ... '.format(URI_snlist)
response = requests.get(URI_snlist)
print 'HTTP response: {} ...'.format(response.status_code)
print 'Saving file {} ...'.format(ofname)
with open(ofname, 'w+') as fsock:
fsock.write(response.content)
else:
print 'Downloaded file {} found ...'.format(fname)
print 'Loading {} to create csv-file ...'.format(fname)
with open(ofname, 'r') as fsock:
sncontent = fsock.read()
print 'Replacing spaces entries with semicolons ...'
sncontent = sncontent.replace(' ', ';').replace('---', '')
print 'Modifying column names ...'
snlist = sncontent.split('\n')
snlist[0] = 'SDSS_id;SN_type;IAUC_id;Ra;Dec;redshift;Peak_MJD'
if 0:
# Debug
ncols = np.zeros(len(snlist))
for i, line in enumerate(snlist):
ncols[i] = len(line.split(';'))
print i + 1, ncols[i]
for i, cnt in enumerate(np.unique(ncols)):
print cnt, (ncols == cnt).sum()
print 'Saving file {} ...'.format(ifname)
with open(ifname, 'w+') as fsock:
fsock.write('\n'.join(snlist))
# return pd.read_csv(
# # ofname, sep=';', skiprows=2, converters=dict(Ra=ra2deg, Dec=dec2deg)
# ifname, sep=';', converters=dict(Ra=ra2deg, Dec=dec2deg)
# )
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
def wcs_world_order(w):
"""
Prints out the return-order of world coordinates,
when using the wcs.WCS.wcs_pix2world() method.
Parameters
----------
w : wcs.WCS object, required
the wcs.WCS object created from a given header
e.g.
hdulist = fits.open('image.fits')
w = wcs.WCS(hdulist[0].header)
Returns
-------
None
References
----------
* astropy.wcs.WCS.wcs_pix2world.__doc__
"""
lat_order = w.wcs.lat
lng_order = w.wcs.lng
lat_type = w.wcs.lattyp
lng_type = w.wcs.lngtyp
world_order = {}
if lat_order < lng_order:
world_order['first'] = ['lat', lat_type]
world_order['last'] = ['lng', lng_type]
else:
world_order['first'] = ['lng', lng_type]
world_order['last'] = ['lat', lat_type]
for order, coord_info in world_order.iteritems():
print '{: <5s}: {} / {}'.format(order, *coord_info)
| bsd-3-clause |
wlamond/scikit-learn | examples/covariance/plot_covariance_estimation.py | 99 | 5074 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.model_selection import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
alphacsc/alphacsc | benchmarks/1D_vs_multi_plot.py | 1 | 4169 | """Benchmark multiple channels vs a single channel for dictionary recovery.
This script plots the results saved by the script 1D_vs_multi_run.py, which
should be run beforehand.
"""
import os
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
'Compare dictionary retrieval capabilities with univariate and '
'multichannel CSC for different SNR.')
parser.add_argument('--file-name', type=str,
default='figures/rank1_snr.pkl',
help='Name of the result file to plot from.')
parser.add_argument('--pdf', action='store_true',
help='Output pdf figures for final version.')
args = parser.parse_args()
file_name = args.file_name
if not os.path.exists(file_name):
raise FileNotFoundError("Could not find result file '{}'. Make sure "
"to run 1D_vs_multi_run.py before using this "
"script.")
extension = "pdf" if args.pdf else "png"
# Load the results
all_results_df = pd.read_pickle(file_name)
# Setup the figure
fontsize = 14
figsize = (6, 3.4)
mpl.rc('mathtext', fontset='cm')
fig = plt.figure(figsize=figsize)
normalize = mcolors.LogNorm(vmin=1, vmax=50)
colormap = plt.cm.get_cmap('viridis')
############################################################
# Plot recovery score against the noise level for different
# channel numbers
############################################################
span_n_channels = all_results_df.run_n_channels.unique()
span_sigma = all_results_df.sigma.unique()
for n_channels in span_n_channels:
curve = []
results_n_channel = all_results_df[
all_results_df['run_n_channels'] == n_channels]
for sigma in span_sigma:
results = results_n_channel[results_n_channel['sigma'] == sigma]
results = results.groupby(['random_state']).min()
curve += [results.score.mean()]
color = colormap(normalize(n_channels))
plt.loglog(span_sigma, curve, color=color,
label="$P={}$".format(n_channels))
plt.legend(loc=2, fontsize=fontsize)
plt.ylabel(r"score($\widehat v$)", fontsize=fontsize)
plt.xlabel(r"Noise level $\eta$", fontsize=fontsize)
plt.tight_layout()
plt.savefig(file_name.replace("pkl", extension), dpi=150)
##############################################################
# For each channel number, plot the recovered atoms compared
# to the initial ones.
##############################################################
sig = all_results_df.sigma.unique()[5]
print("eta = {:.2e}".format(sig))
for P in span_n_channels:
if P == 1:
continue
plt.figure(figsize=figsize)
res_sig = all_results_df[all_results_df.sigma == sig]
lines = []
for n_channels, color in [(1, 'C0'), (P, 'C1')]:
res = res_sig[res_sig.run_n_channels == n_channels]
i0 = res.score.idxmin()
uv_hat = res.uv_hat[i0]
print("[P={}] Best lmbd {}".format(n_channels, res.reg[i0]))
uv = res.uv[i0]
s = np.dot(uv[:, -64:], uv_hat[:, -64:].T)
if np.trace(abs(s)) >= np.trace(abs(s)[::-1]):
uv_hat *= np.sign(np.diag(s))[:, None]
else:
uv_hat *= np.sign(np.diag(s[::-1]))[:, None]
ll = plt.plot(uv_hat[:, -64:].T, color=color, label=n_channels)
lines += [ll[0]]
ll = plt.plot(uv[:, -64:].T, "k--", label="GT")
lines += [ll[0]]
plt.legend(lines, ['$P=1$', '$P={}$'.format(P), "GT"], loc=8,
fontsize=fontsize, ncol=3, columnspacing=.5)
plt.xlabel("Times", fontsize=fontsize)
plt.ylabel("Atoms", fontsize=fontsize)
plt.tight_layout()
plt.savefig(file_name.replace(".pkl", "_uv_hat_P{}.{}").format(
P, extension), dpi=150)
| bsd-3-clause |
jmschrei/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
mendax-grip/cfdemUtilities | vonKarmanSingh/drag.py | 2 | 4513 | # This program analyses the X and Y drag coefficient (drag and lift) from the cylinder immersed
# boundary test cases
# It can be compared visually afterward to experimental data
# Currently is not generic and can only load 2 data set, but anyway more makes it an unreadable mess
#
# USAGE : python ./FOLDERWHEREDATA-1-IS ./FOLDERWHEREDATA-2-IS
#
# Author : Bruno Blais
#Python imports
#----------------
import os
import sys
import numpy
import time
import scipy
import matplotlib.pyplot as plt
import re
#----------------
#TODO
# - Make everything in a single loop instead
#********************************
# OPTIONS AND USER PARAMETERS
#********************************
skip=100
pdf=1
tminFFT=180.
#Figure size
plt.rcParams['figure.figsize'] = 10, 7
params = {'backend': 'ps',
'axes.labelsize': 24,
'text.fontsize': 16,
'legend.fontsize': 18,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'text.usetex': True,
}
plt.rcParams.update(params)
#======================
# MAIN
#======================
tFold= 0
#Read the logs files
if (len(sys.argv)<1):
print 'Folder must be specified when running this python script'
sys.exit("Crashed because folder was not specified")
if (len(sys.argv)>3):
print 'Too many arguments, only the first two folders will be post-processed'
folder = [sys.argv[1], ' ']
if (len(sys.argv)>2):
tFold= 1
folder = [sys.argv[1], sys.argv[2]]
tx1, dx1 = numpy.loadtxt(folder[0]+'/dragX', unpack=True)
ty1, dy1 = numpy.loadtxt(folder[0]+'/dragY', unpack=True)
dx1=dx1*2
dy1=dy1*2
# Take absolute value
dx1= numpy.abs(dx1)
index = numpy.where(ty1>tminFFT)
# Manual FFT to get amplitude and frequencies right!
Fs = 1. / (tx1[2]-tx1[1]) # Sampling frequency
df = 1. / (ty1[-1]-tminFFT)
N= len(dy1[index]) # Number of points
# First normalise the amplitude with respect to the number of points
spectrum = abs(numpy.fft.fft(dy1[index])) / N
f1 = numpy.arange(0.,Fs/2.-df,df)
print "Number of point for FFT:", N
# Keep positive part of the FFT spectrum
Nf = (N)/2
spectrum1 = 2 * spectrum[0:len(f1)]
if (tFold):
tx2, dx2 = numpy.loadtxt(folder[1]+'/dragX', unpack=True)
ty2, dy2 = numpy.loadtxt(folder[1]+'/dragY', unpack=True)
dx2=dx2*2
dy2=dy2*2
index = numpy.where(ty2>tminFFT)
# Take absolute value
dx2= numpy.abs(dx2)
# Manual FFT to get amplitude and frequencies right!
Fs = 1. / (tx2[2]-tx2[1]) # Sampling frequency
df = 1. / ty2[-1]
N= len(dy2[index]) # Number of points
# First normalise the amplitude with respect to the number of points
spectrum = abs(numpy.fft.fft(dy2[index])) / N
f2 = numpy.arange(0.,Fs/2.-df,df)
# Keep positive part of the FFT spectrum
Nf = (N)/2
spectrum2 = 2 * spectrum[0:len(f2)]
# Plotting stage
axfft=plt.figure("FFT C_L")
axfftp = axfft.add_subplot(111)
plt.ylabel(' Amplitude ')
plt.xlabel('Strouhal Number ($St$)')
#plt.title('Frequency spectrum of $C_L$ ')
plt.yscale('log')
plt.xscale('log')
if (tFold ==0):
plt.plot(f1,spectrum1,linewidth=2.0)
if (tFold ==1):
plt.plot(f1,spectrum1,f2,spectrum2,'k',linewidth=2.0)
#axfftp.grid(b=True, which='minor', color='k', linestyle='--')
axfftp.grid(b=True, which='major', color='k', linestyle='--')
if (pdf): plt.savefig("./fftOnCylinder.pdf")
ax = plt.figure("Drag coefficient") #Create window
axp=ax.add_subplot(111)
plt.ylabel('$C_D$, $C_L$ ')
plt.xlabel('time [s]')
#plt.title('Drag coefficients with time for 2D Kelvin-Helmholtz ')
if (tFold ==0):
plt.plot(tx1[skip:],dx1[skip:],'-', label='$C_D$',linewidth=2.0,color='grey')
plt.plot(ty1[skip:],-dy1[skip:],'-', label='$C_L$',linewidth=2.0,color='black')
if (tFold ==1):
plt.plot(tx1[skip:],dx1[skip:],'-', label=('$C_D$-'+sys.argv[1]),linewidth=2.0)
plt.plot(ty1[skip:],-dy1[skip:],'-', label=('$C_L$-'+sys.argv[1]),linewidth=2.0)
plt.plot(tx2[skip:],dx2[skip:],'-', label=('$C_D$-'+sys.argv[2]),linewidth=2.0)
plt.plot(ty2[skip:],-dy2[skip:],'-', label=('$C_L$-'+sys.argv[2]),linewidth=2.0)
plt.legend(loc=3)
print "Averaged CD:\t", numpy.average(dx1[index])
print "Amplitude CD:\t", (numpy.max(dx1[index])-numpy.min(dx1[index]))/2
print "Amplitude CL:\t", (numpy.max(dy1[index])-numpy.min(dy1[index]))/2
print "Average CL:\t", numpy.average(dy1[index])
axp.grid(b=True, which='major', color='k', linestyle='--')
plt.axis((0,200,-1,1.5))
if (pdf): plt.savefig("./forceOnCylinder.pdf")
plt.show()
| lgpl-3.0 |
pmorerio/curriculum-dropout | mnist/analyseResults_mnist_average.py | 1 | 1600 | import cPickle
import glob
import matplotlib.pyplot as plt
import numpy as np
import glob
import seaborn as sns
def average(list_of_lists, last_n=10):
allValues = np.array(list_of_lists)
allValues = np.sort(allValues)
return np.mean(np.mean(allValues[:,-last_n:]))
fig = plt.figure()
fig.suptitle('Results' )
#colors = ['m','b','r','k','g']
colors = ['r','b','k','g']
experiments = sorted(glob.glob('./experiments/**/'))
assert len(experiments) == len(colors)
for exp, color in zip(experiments, colors):
#~ if '_small' in exp:
#~ if 'big' in exp:
#~ continue
runs=sorted(glob.glob(exp + '*accuracies.pkl'))
accTrSet_m, accValSet_m, accTestSet_m, Xentr_m = [], [], [], []
#print runs
#print len(runs)
for run in runs:
f = open(run)
accTrSet, accValSet, accTestSet, Xentr = cPickle.load(f)
accTrSet_m.append(accTrSet)
accValSet_m.append(accValSet)
accTestSet_m.append(accTestSet)
Xentr_m.append(Xentr)
sns.set(color_codes=True)
plt.subplot(3,1,1)
sns.tsplot(Xentr_m, color=color)
plt.ylabel('Xentropy')
plt.xlabel('Iterations (x100)')
plt.grid(b=True, which='both', color='0.65',linestyle='-')
plt.subplot(3,1,2)
sns.tsplot(accTrSet_m, color=color)
plt.ylabel('Acc. Training')
plt.xlabel('Iterations (x100)')
plt.grid(b=True, which='both', color='0.65',linestyle='-')
plt.subplot(3,1,3)
sns.tsplot(accTestSet_m, color=color)
plt.ylabel('Acc. Test')
plt.xlabel('Iterations (x100)')
plt.grid(b=True, which='both', color='0.65',linestyle='-')
print len(runs),"\t",exp.split('/')[-2],"\t",color, "\t",average(accValSet_m)
plt.show()
| gpl-3.0 |
altairpearl/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
kazemakase/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 244 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/pandas/core/reshape/reshape.py | 1 | 35967 | # pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
from pandas.compat import range, text_type, zip
from pandas import compat
from functools import partial
import itertools
import numpy as np
from pandas.core.dtypes.common import (
_ensure_platform_int,
is_list_like, is_bool_dtype,
needs_i8_conversion, is_sparse, is_object_dtype)
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.missing import notna
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.sparse.api import SparseDataFrame, SparseSeries
from pandas.core.sparse.array import SparseArray
from pandas._libs.sparse import IntIndex
from pandas.core.arrays import Categorical
from pandas.core.arrays.categorical import _factorize_from_iterable
from pandas.core.sorting import (get_group_index, get_compressed_ids,
compress_group_index, decons_obs_group_ids)
import pandas.core.algorithms as algos
from pandas._libs import algos as _algos, reshape as _reshape
from pandas.core.index import Index, MultiIndex
class _Unstacker(object):
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
values : ndarray
Values of DataFrame to "Unstack"
index : object
Pandas ``Index``
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
value_columns : Index, optional
Pandas ``Index`` or ``MultiIndex`` object if unstacking a DataFrame
fill_value : scalar, optional
Default value to fill in missing values if subgroups do not have the
same set of labels. By default, missing values will be replaced with
the default fill value for that data type, NaN for float, NaT for
datetimelike, etc. For integer types, by default data will converted to
float and missing values will be set to NaN.
constructor : object
Pandas ``DataFrame`` or subclass used to create unstacked
response. If None, DataFrame or SparseDataFrame will be used.
Examples
--------
>>> import pandas as pd
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, values, index, level=-1, value_columns=None,
fill_value=None, constructor=None):
self.is_categorical = None
self.is_sparse = is_sparse(values)
if values.ndim == 1:
if isinstance(values, Categorical):
self.is_categorical = values
values = np.array(values)
elif self.is_sparse:
# XXX: Makes SparseArray *dense*, but it's supposedly
# a single column at a time, so it's "doable"
values = values.values
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
self.fill_value = fill_value
if constructor is None:
if self.is_sparse:
self.constructor = SparseDataFrame
else:
self.constructor = DataFrame
else:
self.constructor = constructor
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index.remove_unused_levels()
if isinstance(self.index, MultiIndex):
if index._reference_duplicate_name(level):
msg = ("Ambiguous reference to {level}. The index "
"names are not unique.".format(level=level))
raise ValueError(msg)
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.labels[self.level] else 0
self.new_index_levels = list(self.index.levels)
self.new_index_names = list(self.index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self.removed_level_full = index.levels[self.level]
self._make_sorted_values_labels()
self._make_selectors()
def _make_sorted_values_labels(self):
v = self.level
labs = list(self.index.labels)
levs = list(self.index.levels)
to_sort = labs[:v] + labs[v + 1:] + [labs[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = _algos.groupsort_indexer(comp_index, ngroups)[0]
indexer = _ensure_platform_int(indexer)
self.sorted_values = algos.take_nd(self.values, indexer, axis=0)
self.sorted_labels = [l.take(indexer) for l in to_sort]
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = _ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self):
values, _ = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
# may need to coerce categoricals here
if self.is_categorical is not None:
categories = self.is_categorical.categories
ordered = self.is_categorical.ordered
values = [Categorical(values[:, i], categories=categories,
ordered=ordered)
for i in range(values.shape[-1])]
return self.constructor(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
mask = self.mask
mask_all = mask.all()
# we can simply reshape if we don't have a mask
if mask_all and len(values):
new_values = (self.sorted_values
.reshape(length, width, stride)
.swapaxes(1, 2)
.reshape(result_shape)
)
new_mask = np.ones(result_shape, dtype=bool)
return new_values, new_mask
# if our mask is all True, then we can use our existing dtype
if mask_all:
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = maybe_promote(values.dtype, self.fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
name = np.dtype(dtype).name
sorted_values = self.sorted_values
# we need to convert to a basic dtype
# and possibly coerce an input to our output dtype
# e.g. ints -> floats
if needs_i8_conversion(values):
sorted_values = sorted_values.view('i8')
new_values = new_values.view('i8')
name = 'int64'
elif is_bool_dtype(values):
sorted_values = sorted_values.astype('object')
new_values = new_values.astype('object')
name = 'object'
else:
sorted_values = sorted_values.astype(name, copy=False)
# fill in our values & mask
f = getattr(_reshape, "unstack_{name}".format(name=name))
f(sorted_values,
mask.view('u1'),
stride,
length,
width,
new_values,
new_mask.view('u1'))
# reconstruct dtype if needed
if needs_i8_conversion(values):
new_values = new_values.view(values.dtype)
return new_values, new_mask
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, lev._na_value)
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level_full,)
new_names = self.value_columns.names + (self.removed_name,)
new_labels = [lab.take(propagator)
for lab in self.value_columns.labels]
else:
new_levels = [self.value_columns, self.removed_level_full]
new_names = [self.value_columns.name, self.removed_name]
new_labels = [propagator]
# The two indices differ only if the unstacked level had unused items:
if len(self.removed_level_full) != len(self.removed_level):
# In this case, we remap the new labels to the original level:
repeater = self.removed_level_full.get_indexer(self.removed_level)
if self.lift:
repeater = np.insert(repeater, 0, -1)
else:
# Otherwise, we just use each level item exactly once:
repeater = np.arange(stride) - self.lift
# The entire level is then just a repetition of the single chunk:
new_labels.append(np.tile(repeater, width))
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def get_new_index(self):
result_labels = [lab.take(self.compressor)
for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_labels[0]
if (lab == -1).any():
lev = lev.insert(len(lev), lev._na_value)
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels, labels=result_labels,
names=self.new_index_names, verify_integrity=False)
def _unstack_multiple(data, clocs, fill_value=None):
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
clabels = [index.labels[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rlabels = [index.labels[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(clabels, shape, sort=False, xnull=False)
comp_ids, obs_ids = compress_group_index(group_index, sort=False)
recons_labels = decons_obs_group_ids(comp_ids, obs_ids, shape, clabels,
xnull=False)
if rlocs == []:
# Everything is in clocs, so the dummy df has a regular index
dummy_index = Index(obs_ids, name='__placeholder__')
else:
dummy_index = MultiIndex(levels=rlevels + [obs_ids],
labels=rlabels + [comp_ids],
names=rnames + ['__placeholder__'],
verify_integrity=False)
if isinstance(data, Series):
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__', fill_value=fill_value)
new_levels = clevels
new_names = cnames
new_labels = recons_labels
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val)
clocs = [v if i > v else v - 1 for v in clocs]
return result
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__', fill_value=fill_value)
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_labels = [unstcols.labels[0]]
for rec in recons_labels:
new_labels.append(rec.take(unstcols.labels[-1]))
new_columns = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def pivot(self, index=None, columns=None, values=None):
"""
See DataFrame.pivot
"""
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = self.set_index(cols, append=append)
else:
if index is None:
index = self.index
else:
index = self[index]
index = MultiIndex.from_arrays([index, self[columns]])
if is_list_like(values) and not isinstance(values, tuple):
# Exclude tuple because it is seen as a single column name
indexed = self._constructor(self[values].values, index=index,
columns=values)
else:
indexed = self._constructor_sliced(self[values].values,
index=index)
return indexed.unstack(columns)
def pivot_simple(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : ndarray
Labels to use to make new frame's index
columns : ndarray
Labels to use to make new frame's columns
values : ndarray
Values to use for populating new frame's values
Notes
-----
Obviously, all 3 of the input arguments must have the same length
Returns
-------
DataFrame
See also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair
"""
if (len(index) != len(columns)) or (len(columns) != len(values)):
raise AssertionError('Length of index, columns, and values must be the'
' same')
if len(index) == 0:
return DataFrame(index=[])
hindex = MultiIndex.from_arrays([index, columns])
series = Series(values.ravel(), index=hindex)
series = series.sort_index(level=0)
return series.unstack()
def _slow_pivot(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : string or object
Column name to use to make new frame's index
columns : string or object
Column name to use to make new frame's columns
values : string or object
Column name to use for populating new frame's values
Could benefit from some Cython here.
"""
tree = {}
for i, (idx, col) in enumerate(zip(index, columns)):
if col not in tree:
tree[col] = {}
branch = tree[col]
branch[idx] = values[i]
return DataFrame(tree)
def unstack(obj, level, fill_value=None):
if isinstance(level, (tuple, list)):
if len(level) != 1:
# _unstack_multiple only handles MultiIndexes,
# and isn't needed for a single level
return _unstack_multiple(obj, level, fill_value=fill_value)
else:
level = level[0]
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value)
else:
return obj.T.stack(dropna=False)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
fill_value=fill_value,
constructor=obj._constructor_expanddim)
return unstacker.get_result()
def _unstack_frame(obj, level, fill_value=None):
if obj._is_mixed_type:
unstacker = partial(_Unstacker, index=obj.index,
level=level, fill_value=fill_value)
blocks = obj._data.unstack(unstacker)
return obj._constructor(blocks)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns,
fill_value=fill_value,
constructor=obj._constructor)
return unstacker.get_result()
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
codes, categories = _factorize_from_iterable(index)
return categories, codes
N, K = frame.shape
if isinstance(frame.columns, MultiIndex):
if frame.columns._reference_duplicate_name(level):
msg = ("Ambiguous reference to {level}. The column "
"names are not unique.".format(level=level))
raise ValueError(msg)
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_labels = [lab.repeat(K) for lab in frame.index.labels]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_labels.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
else:
levels, (ilab, clab) = zip(*map(factorize, (frame.index,
frame.columns)))
labels = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(levels=levels, labels=labels,
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
new_values = frame.values.ravel()
if dropna:
mask = notna(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return frame._constructor_sliced(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError("level should contain all level names or all level "
"numbers, not a mixture of the two.")
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something we can safely pass
to swaplevel:
We generally want to convert the level number into a level name, except
when columns do not have names, in which case we must leave as a level
number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sort_index(level=level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[lev.take(lab)
for lev, lab in zip(this.columns.levels[:-1],
this.columns.labels[:-1])]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = unique_groups = this.columns.levels[0]
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_labels = sorted(set(this.columns.labels[-1]))
level_vals_used = level_vals[level_labels]
levsize = len(level_labels)
drop_cols = []
for key in unique_groups:
try:
loc = this.columns.get_loc(key)
except KeyError:
drop_cols.append(key)
continue
# can make more efficient?
# we almost always return a slice
# but if unsorted can get a boolean
# indexer
if not isinstance(loc, slice):
slice_len = len(loc)
else:
slice_len = loc.stop - loc.start
if slice_len != levsize:
chunk = this.loc[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.labels[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_mixed_type:
value_slice = this.loc[:, this.columns[loc]].values
else:
value_slice = this.values[:, loc]
new_data[key] = value_slice.ravel()
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_labels = [lab.repeat(levsize) for lab in this.index.labels]
else:
new_levels = [this.index]
new_labels = [np.arange(N).repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(level_vals)
new_labels.append(np.tile(level_labels, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
result = frame._constructor(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how='all')
return result
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False, drop_first=False, dtype=None):
"""
Convert categorical variable into dummy/indicator variables
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy columns should be sparse or not. Returns
SparseDataFrame if `data` is a Series or if all columns are included.
Otherwise returns a DataFrame with some SparseBlocks.
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
.. versionadded:: 0.18.0
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
.. versionadded:: 0.23.0
Returns
-------
dummies : DataFrame or SparseDataFrame
Examples
--------
>>> import pandas as pd
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> pd.get_dummies(pd.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
See Also
--------
Series.str.get_dummies
"""
from pandas.core.reshape.concat import concat
from itertools import cycle
dtypes_to_encode = ['object', 'category']
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
data_to_encode = data.select_dtypes(
include=dtypes_to_encode)
else:
data_to_encode = data[columns]
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
len_msg = ("Length of '{name}' ({len_item}) did not match the "
"length of the columns being encoded ({len_enc}).")
if is_list_like(item):
if not len(item) == data_to_encode.shape[1]:
len_msg = \
len_msg.format(name=name, len_item=len(item),
len_enc=data_to_encode.shape[1])
raise ValueError(len_msg)
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in data_to_encode.columns]
if prefix is None:
prefix = data_to_encode.columns
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]
if data_to_encode.shape == data.shape:
# Encoding the entire df, do not prepend any dropped columns
with_dummies = []
elif columns is not None:
# Encoding only cols specified in columns. Get all cols not in
# columns to prepend to result.
with_dummies = [data.drop(columns, axis=1)]
else:
# Encoding only object and category dtype columns. Get remaining
# columns to prepend to result.
with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]
for (col, pre, sep) in zip(data_to_encode.iteritems(), prefix,
prefix_sep):
# col is (column_name, column), use just column data here
dummy = _get_dummies_1d(col[1], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse,
drop_first=drop_first, dtype=dtype)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype)
return result
def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False,
sparse=False, drop_first=False, dtype=None):
# Series avoids inconsistent NaN handling
codes, levels = _factorize_from_iterable(Series(data))
if dtype is None:
dtype = np.uint8
dtype = np.dtype(dtype)
if is_object_dtype(dtype):
raise ValueError("dtype=object is not a valid dtype for get_dummies")
def get_empty_Frame(data, sparse):
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
if not sparse:
return DataFrame(index=index)
else:
return SparseDataFrame(index=index, default_fill_value=0)
# if all NaN
if not dummy_na and len(levels) == 0:
return get_empty_Frame(data, sparse)
codes = codes.copy()
if dummy_na:
codes[codes == -1] = len(levels)
levels = np.append(levels, np.nan)
# if dummy_na, we just fake a nan level. drop_first will drop it again
if drop_first and len(levels) == 1:
return get_empty_Frame(data, sparse)
number_of_cols = len(levels)
if prefix is not None:
dummy_strs = [u'{prefix}{sep}{level}' if isinstance(v, text_type)
else '{prefix}{sep}{level}' for v in levels]
dummy_cols = [dummy_str.format(prefix=prefix, sep=prefix_sep, level=v)
for dummy_str, v in zip(dummy_strs, levels)]
else:
dummy_cols = levels
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
sparse_series = {}
N = len(data)
sp_indices = [[] for _ in range(len(dummy_cols))]
for ndx, code in enumerate(codes):
if code == -1:
# Blank entries if not dummy_na and code == -1, #GH4446
continue
sp_indices[code].append(ndx)
if drop_first:
# remove first categorical level to avoid perfect collinearity
# GH12042
sp_indices = sp_indices[1:]
dummy_cols = dummy_cols[1:]
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(np.ones(len(ixs), dtype=dtype),
sparse_index=IntIndex(N, ixs), fill_value=0,
dtype=dtype)
sparse_series[col] = SparseSeries(data=sarr, index=index)
out = SparseDataFrame(sparse_series, index=index, columns=dummy_cols,
default_fill_value=0,
dtype=dtype)
return out
else:
dummy_mat = np.eye(number_of_cols, dtype=dtype).take(codes, axis=0)
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def make_axis_dummies(frame, axis='minor', transform=None):
"""
Construct 1-0 dummy variables corresponding to designated axis
labels
Parameters
----------
frame : DataFrame
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
get "day of week" dummies in a time series regression
you might call::
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
-------
dummies : DataFrame
Column names taken from chosen axis
"""
numbers = {'major': 0, 'minor': 1}
num = numbers.get(axis, axis)
items = frame.index.levels[num]
labels = frame.index.labels[num]
if transform is not None:
mapped_items = items.map(transform)
labels, items = _factorize_from_iterable(mapped_items.take(labels))
values = np.eye(len(items), dtype=float)
values = values.take(labels, axis=0)
return DataFrame(values, columns=items, index=frame.index)
| mit |
srowen/spark | python/setup.py | 14 | 13273 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.util
import glob
import os
import sys
from setuptools import setup
from setuptools.command.install import install
from shutil import copyfile, copytree, rmtree
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
try:
spec = importlib.util.spec_from_file_location("install", "pyspark/install.py")
install_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(install_module)
except IOError:
print("Failed to load the installing module (pyspark/install.py) which had to be "
"packaged together.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
USER_SCRIPTS_PATH = os.path.join(SPARK_HOME, "sbin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
USER_SCRIPTS_TARGET = os.path.join(TEMP_PATH, "sbin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
# If you are changing the versions here, please also change ./python/pyspark/sql/pandas/utils.py
# For Arrow, you should also check ./pom.xml and ensure there are no breaking changes in the
# binary format protocol with the Java version, see ARROW_HOME/format/* for specifications.
# Also don't forget to update python/docs/source/getting_started/install.rst.
_minimum_pandas_version = "0.23.2"
_minimum_pyarrow_version = "1.0.0"
class InstallCommand(install):
# TODO(SPARK-32837) leverage pip's custom options
def run(self):
install.run(self)
# Make sure the destination is always clean.
spark_dist = os.path.join(self.install_lib, "pyspark", "spark-distribution")
rmtree(spark_dist, ignore_errors=True)
if ("PYSPARK_HADOOP_VERSION" in os.environ) or ("PYSPARK_HIVE_VERSION" in os.environ):
# Note that PYSPARK_VERSION environment is just a testing purpose.
# PYSPARK_HIVE_VERSION environment variable is also internal for now in case
# we support another version of Hive in the future.
spark_version, hadoop_version, hive_version = install_module.checked_versions(
os.environ.get("PYSPARK_VERSION", VERSION).lower(),
os.environ.get("PYSPARK_HADOOP_VERSION", install_module.DEFAULT_HADOOP).lower(),
os.environ.get("PYSPARK_HIVE_VERSION", install_module.DEFAULT_HIVE).lower())
if ("PYSPARK_VERSION" not in os.environ and
((install_module.DEFAULT_HADOOP, install_module.DEFAULT_HIVE) ==
(hadoop_version, hive_version))):
# Do not download and install if they are same as default.
return
install_module.install_spark(
dest=spark_dist,
spark_version=spark_version,
hadoop_version=hadoop_version,
hive_version=hive_version)
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(USER_SCRIPTS_PATH, USER_SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(USER_SCRIPTS_PATH, USER_SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
with open('README.md') as f:
long_description = f.read()
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
long_description_content_type="text/markdown",
author='Spark Developers',
author_email='[email protected]',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.cloudpickle',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.sql.avro',
'pyspark.sql.pandas',
'pyspark.streaming',
'pyspark.bin',
'pyspark.sbin',
'pyspark.jars',
'pyspark.pandas',
'pyspark.pandas.data_type_ops',
'pyspark.pandas.indexes',
'pyspark.pandas.missing',
'pyspark.pandas.plot',
'pyspark.pandas.spark',
'pyspark.pandas.typedef',
'pyspark.pandas.usage_logging',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.resource',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.sbin': 'deps/sbin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.sbin': ['spark-config.sh', 'spark-daemon.sh',
'start-history-server.sh',
'stop-history-server.sh', ],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
# Don't forget to update python/docs/source/getting_started/install.rst
# if you're updating the versions or dependencies.
install_requires=['py4j==0.10.9.2'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
],
'pandas_on_spark': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
'numpy>=1.14',
],
},
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Typing :: Typed'],
cmdclass={
'install': InstallCommand,
},
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "sbin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "sbin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
| apache-2.0 |
mhvk/astropy | astropy/modeling/functional_models.py | 2 | 90573 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Mathematical models."""
# pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name
import numpy as np
from astropy import units as u
from astropy.units import Quantity, UnitsError
from astropy.utils.decorators import deprecated
from .core import (Fittable1DModel, Fittable2DModel)
from .parameters import Parameter, InputParameterError
from .utils import ellipse_extent
__all__ = ['AiryDisk2D', 'Moffat1D', 'Moffat2D', 'Box1D', 'Box2D', 'Const1D',
'Const2D', 'Ellipse2D', 'Disk2D', 'Gaussian1D', 'Gaussian2D',
'Linear1D', 'Lorentz1D', 'RickerWavelet1D', 'RickerWavelet2D',
'RedshiftScaleFactor', 'Multiply', 'Planar2D', 'Scale',
'Sersic1D', 'Sersic2D', 'Shift', 'Sine1D', 'Trapezoid1D',
'TrapezoidDisk2D', 'Ring2D', 'Voigt1D', 'KingProjectedAnalytic1D',
'Exponential1D', 'Logarithmic1D']
TWOPI = 2 * np.pi
FLOAT_EPSILON = float(np.finfo(np.float32).tiny)
# Note that we define this here rather than using the value defined in
# astropy.stats to avoid importing astropy.stats every time astropy.modeling
# is loaded.
GAUSSIAN_SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0))
class Gaussian1D(Fittable1DModel):
"""
One dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian - for a normalized profile
(integrating to 1), set amplitude = 1 / (stddev * np.sqrt(2 * np.pi))
mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian.
stddev : float or `~astropy.units.Quantity`.
Standard deviation of the Gaussian with FWHM = 2 * stddev * np.sqrt(2 * np.log(2)).
Notes
-----
Either all or none of input ``x``, ``mean`` and ``stddev`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that 'mean' is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Gaussian1D
plt.figure()
s1 = Gaussian1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
See Also
--------
Gaussian2D, Box1D, Moffat1D, Lorentz1D
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the Gaussian")
mean = Parameter(default=0, description="Position of peak (Gaussian)")
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
stddev = Parameter(default=1, bounds=(FLOAT_EPSILON, None), description="Standard deviation of the Gaussian")
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``
Parameters
----------
factor : float
The multiple of `stddev` used to define the limits.
The default is 5.5, corresponding to a relative error < 1e-7.
Examples
--------
>>> from astropy.modeling.models import Gaussian1D
>>> model = Gaussian1D(mean=0, stddev=2)
>>> model.bounding_box
(-11.0, 11.0)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor,
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
(-4.0, 4.0)
"""
x0 = self.mean
dx = factor * self.stddev
return (x0 - dx, x0 + dx)
@property
def fwhm(self):
"""Gaussian full width at half maximum."""
return self.stddev * GAUSSIAN_SIGMA_TO_FWHM
@staticmethod
def evaluate(x, amplitude, mean, stddev):
"""
Gaussian1D model function.
"""
return amplitude * np.exp(- 0.5 * (x - mean) ** 2 / stddev ** 2)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
@property
def input_units(self):
if self.mean.unit is None:
return None
return {self.inputs[0]: self.mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'mean': inputs_unit[self.inputs[0]],
'stddev': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Gaussian2D(Fittable2DModel):
r"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian.
x_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in x.
y_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in y.
x_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in x before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
y_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in y before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
theta : float or `~astropy.units.Quantity`, optional.
Rotation angle (value in radians). The rotation angle increases
counterclockwise. Must be None if a covariance matrix (``cov_matrix``)
is provided. If no ``cov_matrix`` is given, ``None`` means the default
value (0).
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` defaults.
Notes
-----
Either all or none of input ``x, y``, ``[x,y]_mean`` and ``[x,y]_stddev``
must be provided consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right)
\left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}}
Using the following definitions:
.. math::
a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} -
\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right)
c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
If using a ``cov_matrix``, the model is of the form:
.. math::
f(x, y) = A e^{-0.5 \left(\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}\right)}
where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`,
and :math:`\Sigma` is the covariance matrix:
.. math::
\Sigma = \left(\begin{array}{ccc}
\sigma_x^2 & \rho \sigma_x \sigma_y \\
\rho \sigma_x \sigma_y & \sigma_y^2
\end{array}\right)
:math:`\rho` is the correlation between ``x`` and ``y``, which should
be between -1 and +1. Positive correlation corresponds to a
``theta`` in the range 0 to 90 degrees. Negative correlation
corresponds to a ``theta`` in the range of 0 to -90 degrees.
See [1]_ for more details about the 2D Gaussian function.
See Also
--------
Gaussian1D, Box2D, Moffat2D
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
amplitude = Parameter(default=1, description="Amplitude of the Gaussian")
x_mean = Parameter(default=0, description="Peak position (along x axis) of Gaussian")
y_mean = Parameter(default=0, description="Peak position (along y axis) of Gaussian")
x_stddev = Parameter(default=1, description="Standard deviation of the Gaussian (along x axis)")
y_stddev = Parameter(default=1, description="Standard deviation of the Gaussian (along y axis)")
theta = Parameter(default=0.0, description="Rotation angle [in radians] (Optional parameter)")
def __init__(self, amplitude=amplitude.default, x_mean=x_mean.default,
y_mean=y_mean.default, x_stddev=None, y_stddev=None,
theta=None, cov_matrix=None, **kwargs):
if cov_matrix is None:
if x_stddev is None:
x_stddev = self.__class__.x_stddev.default
if y_stddev is None:
y_stddev = self.__class__.y_stddev.default
if theta is None:
theta = self.__class__.theta.default
else:
if x_stddev is not None or y_stddev is not None or theta is not None:
raise InputParameterError("Cannot specify both cov_matrix and "
"x/y_stddev/theta")
# Compute principle coordinate system transformation
cov_matrix = np.array(cov_matrix)
if cov_matrix.shape != (2, 2):
raise ValueError("Covariance matrix must be 2x2")
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
# TODO: Investigate why setting this in Parameter above causes
# convolution tests to hang.
kwargs.setdefault('bounds', {})
kwargs['bounds'].setdefault('x_stddev', (FLOAT_EPSILON, None))
kwargs['bounds'].setdefault('y_stddev', (FLOAT_EPSILON, None))
super().__init__(
amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, **kwargs)
@property
def x_fwhm(self):
"""Gaussian full width at half maximum in X."""
return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM
@property
def y_fwhm(self):
"""Gaussian full width at half maximum in Y."""
return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits in each dimension,
``((y_low, y_high), (x_low, x_high))``
The default offset from the mean is 5.5-sigma, corresponding
to a relative error < 1e-7. The limits are adjusted for rotation.
Parameters
----------
factor : float, optional
The multiple of `x_stddev` and `y_stddev` used to define the limits.
The default is 5.5.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2)
>>> model.bounding_box
((-11.0, 11.0), (-5.5, 5.5))
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
((-4.0, 4.0), (-2.0, 2.0))
"""
a = factor * self.x_stddev
b = factor * self.y_stddev
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_mean - dy, self.y_mean + dy),
(self.x_mean - dx, self.x_mean + dx))
@staticmethod
def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function"""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(-((a * xdiff ** 2) + (b * xdiff * ydiff) +
(c * ydiff ** 2)))
@staticmethod
def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative with respect to parameters"""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2. * theta)
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xstd3 = x_stddev ** 3
ystd3 = y_stddev ** 3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff ** 2
ydiff2 = ydiff ** 2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) +
(c * ydiff2)))
da_dtheta = (sint * cost * ((1. / ystd2) - (1. / xstd2)))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (cos2t / xstd2) - (cos2t / ystd2)
db_dx_stddev = -sin2t / xstd3
db_dy_stddev = sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2. * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2. * c * ydiff))
dg_dx_stddev = g * (-(da_dx_stddev * xdiff2 +
db_dx_stddev * xdiff * ydiff +
dc_dx_stddev * ydiff2))
dg_dy_stddev = g * (-(da_dy_stddev * xdiff2 +
db_dy_stddev * xdiff * ydiff +
dc_dy_stddev * ydiff2))
dg_dtheta = g * (-(da_dtheta * xdiff2 +
db_dtheta * xdiff * ydiff +
dc_dtheta * ydiff2))
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev,
dg_dtheta]
@property
def input_units(self):
if self.x_mean.unit is None and self.y_mean.unit is None:
return None
return {self.inputs[0]: self.x_mean.unit,
self.inputs[1]: self.y_mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_mean': inputs_unit[self.inputs[0]],
'y_mean': inputs_unit[self.inputs[0]],
'x_stddev': inputs_unit[self.inputs[0]],
'y_stddev': inputs_unit[self.inputs[0]],
'theta': u.rad,
'amplitude': outputs_unit[self.outputs[0]]}
class Shift(Fittable1DModel):
"""
Shift a coordinate.
Parameters
----------
offset : float
Offset to add to a coordinate.
"""
offset = Parameter(default=0, description="Offset to add to a model")
linear = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.offset.unit is None:
return None
return {self.inputs[0]: self.offset.unit}
@property
def inverse(self):
"""One dimensional inverse Shift model function"""
inv = self.copy()
inv.offset *= -1
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.offset) for x in self.bounding_box)
return inv
@staticmethod
def evaluate(x, offset):
"""One dimensional Shift model function"""
return x + offset
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model"""
return x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Shift model derivative with respect to parameter"""
d_offset = np.ones_like(x)
return [d_offset]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'offset': outputs_unit[self.outputs[0]]}
class Scale(Fittable1DModel):
"""
Multiply a model by a dimensionless factor.
Parameters
----------
factor : float
Factor by which to scale a coordinate.
Notes
-----
If ``factor`` is a `~astropy.units.Quantity` then the units will be
stripped before the scaling operation.
"""
factor = Parameter(default=1, description="Factor by which to scale a model")
linear = True
fittable = True
_input_units_strict = True
_input_units_allow_dimensionless = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.factor.unit is None:
return None
return {self.inputs[0]: self.factor.unit}
@property
def inverse(self):
"""One dimensional inverse Scale model function"""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.factor) for x in self.bounding_box)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional Scale model function"""
if isinstance(factor, u.Quantity):
factor = factor.value
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Scale model derivative with respect to parameter"""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'factor': outputs_unit[self.outputs[0]]}
class Multiply(Fittable1DModel):
"""
Multiply a model by a quantity or number.
Parameters
----------
factor : float
Factor by which to multiply a coordinate.
"""
factor = Parameter(default=1, description="Factor by which to multiply a model")
linear = True
fittable = True
_has_inverse_bounding_box = True
@property
def inverse(self):
"""One dimensional inverse multiply model function"""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.factor) for x in self.bounding_box)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional multiply model function"""
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional multiply model derivative with respect to parameter"""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'factor': outputs_unit[self.outputs[0]]}
class RedshiftScaleFactor(Fittable1DModel):
"""
One dimensional redshift scale factor model.
Parameters
----------
z : float
Redshift value.
Notes
-----
Model formula:
.. math:: f(x) = x (1 + z)
"""
z = Parameter(description='Redshift', default=0)
_has_inverse_bounding_box = True
@staticmethod
def evaluate(x, z):
"""One dimensional RedshiftScaleFactor model function"""
return (1 + z) * x
@staticmethod
def fit_deriv(x, z):
"""One dimensional RedshiftScaleFactor model derivative"""
d_z = x
return [d_z]
@property
def inverse(self):
"""Inverse RedshiftScaleFactor model"""
inv = self.copy()
inv.z = 1.0 / (1.0 + self.z) - 1.0
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.z) for x in self.bounding_box)
return inv
class Sersic1D(Fittable1DModel):
r"""
One dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
See Also
--------
Gaussian1D, Moffat1D, Lorentz1D
Notes
-----
Model formula:
.. math::
I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic1D
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(111, xscale='log', yscale='log')
s1 = Sersic1D(amplitude=1, r_eff=5)
r=np.arange(0, 100, .01)
for n in range(1, 10):
s1.n = n
plt.plot(r, s1(r), color=str(float(n) / 15))
plt.axis([1e-1, 30, 1e-2, 1e3])
plt.xlabel('log Radius')
plt.ylabel('log Surface Brightness')
plt.text(.25, 1.5, 'n=1')
plt.text(.25, 300, 'n=10')
plt.xticks([])
plt.yticks([])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
_gammaincinv = None
@classmethod
def evaluate(cls, r, amplitude, r_eff, n):
"""One dimensional Sersic profile function."""
if cls._gammaincinv is None:
try:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
except ValueError:
raise ImportError('Sersic1D model requires scipy.')
return (amplitude * np.exp(
-cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1)))
@property
def input_units(self):
if self.r_eff.unit is None:
return None
return {self.inputs[0]: self.r_eff.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'r_eff': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Sine1D(Fittable1DModel):
"""
One dimensional Sine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Sine1D
plt.figure()
s1 = Sine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
amplitude = Parameter(default=1, description="Oscillation amplitude")
frequency = Parameter(default=1, description="Oscillation frequency")
phase = Parameter(default=0, description="Oscillation phase")
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Sine model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.sin(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Sine model derivative"""
d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase)
d_frequency = (TWOPI * x * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
d_phase = (TWOPI * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def input_units(self):
if self.frequency.unit is None:
return None
return {self.inputs[0]: 1. / self.frequency.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'frequency': inputs_unit[self.inputs[0]] ** -1,
'amplitude': outputs_unit[self.outputs[0]]}
class Linear1D(Fittable1DModel):
"""
One dimensional Line model.
Parameters
----------
slope : float
Slope of the straight line
intercept : float
Intercept of the straight line
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x) = a x + b
"""
slope = Parameter(default=1, description="Slope of the straight line")
intercept = Parameter(default=0, description="Intercept of the straight line")
linear = True
@staticmethod
def evaluate(x, slope, intercept):
"""One dimensional Line model function"""
return slope * x + intercept
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Line model derivative with respect to parameters"""
d_slope = x
d_intercept = np.ones_like(x)
return [d_slope, d_intercept]
@property
def inverse(self):
new_slope = self.slope ** -1
new_intercept = -self.intercept / self.slope
return self.__class__(slope=new_slope, intercept=new_intercept)
@property
def input_units(self):
if self.intercept.unit is None and self.slope.unit is None:
return None
return {self.inputs[0]: self.intercept.unit / self.slope.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'intercept': outputs_unit[self.outputs[0]],
'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]]}
class Planar2D(Fittable2DModel):
"""
Two dimensional Plane model.
Parameters
----------
slope_x : float
Slope of the plane in X
slope_y : float
Slope of the plane in Y
intercept : float
Z-intercept of the plane
Notes
-----
Model formula:
.. math:: f(x, y) = a x + b y + c
"""
slope_x = Parameter(default=1, description="Slope of the plane in X")
slope_y = Parameter(default=1, description="Slope of the plane in Y")
intercept = Parameter(default=0, description="Z-intercept of the plane")
linear = True
@staticmethod
def evaluate(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model function"""
return slope_x * x + slope_y * y + intercept
@staticmethod
def fit_deriv(x, y, *params):
"""Two dimensional Plane model derivative with respect to parameters"""
d_slope_x = x
d_slope_y = y
d_intercept = np.ones_like(x)
return [d_slope_x, d_slope_y, d_intercept]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'intercept': outputs_unit['z'],
'slope_x': outputs_unit['z'] / inputs_unit['x'],
'slope_y': outputs_unit['z'] / inputs_unit['y']}
class Lorentz1D(Fittable1DModel):
"""
One dimensional Lorentzian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Peak value - for a normalized profile (integrating to 1),
set amplitude = 2 / (np.pi * fwhm)
x_0 : float or `~astropy.units.Quantity`.
Position of the peak
fwhm : float or `~astropy.units.Quantity`.
Full width at half maximum (FWHM)
See Also
--------
Gaussian1D, Box1D, RickerWavelet1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and ``fwhm`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}}
where :math:`\\gamma` is half of given FWHM.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Lorentz1D
plt.figure()
s1 = Lorentz1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Peak value")
x_0 = Parameter(default=0, description="Position of the peak")
fwhm = Parameter(default=1, description="Full width at half maximum")
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model function"""
return (amplitude * ((fwhm / 2.) ** 2) / ((x - x_0) ** 2 +
(fwhm / 2.) ** 2))
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model derivative with respect to parameters"""
d_amplitude = fwhm ** 2 / (fwhm ** 2 + (x - x_0) ** 2)
d_x_0 = (amplitude * d_amplitude * (2 * x - 2 * x_0) /
(fwhm ** 2 + (x - x_0) ** 2))
d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
def bounding_box(self, factor=25):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
Default is chosen to include most (99%) of the
area under the curve, while still showing the
central feature of interest.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'fwhm': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Voigt1D(Fittable1DModel):
"""
One dimensional model for the Voigt profile.
Parameters
----------
x_0 : float or `~astropy.units.Quantity`
Position of the peak
amplitude_L : float or `~astropy.units.Quantity`.
The Lorentzian amplitude (peak of the associated Lorentz function)
- for a normalized profile (integrating to 1), set
amplitude_L = 2 / (np.pi * fwhm_L)
fwhm_L : float or `~astropy.units.Quantity`
The Lorentzian full width at half maximum
fwhm_G : float or `~astropy.units.Quantity`.
The Gaussian full width at half maximum
method : str, optional
Algorithm for computing the complex error function; one of
'Humlicek2' (default, fast and generally more accurate than ``rtol=3.e-5``) or
'Scipy', alternatively 'wofz' (requires ``scipy``, almost as fast and
reference in accuracy).
See Also
--------
Gaussian1D, Lorentz1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and the ``fwhm_*`` must be provided
consistently with compatible units or as unitless numbers.
Voigt function is calculated as real part of the complex error function computed from either
Humlicek's rational approximations (JQSRT 21:309, 1979; 27:437, 1982) following
Schreier 2018 (MNRAS 479, 3068; and ``hum2zpf16m`` from his cpfX.py module); or
`~scipy.special.wofz` (implementing 'Faddeeva.cc').
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Voigt1D
import matplotlib.pyplot as plt
plt.figure()
x = np.arange(0, 10, 0.01)
v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
plt.plot(x, v1(x))
plt.show()
"""
x_0 = Parameter(default=0,
description="Position of the peak")
amplitude_L = Parameter(default=1, # noqa: N815
description="The Lorentzian amplitude")
fwhm_L = Parameter(default=2/np.pi, # noqa: N815
description="The Lorentzian full width at half maximum")
fwhm_G = Parameter(default=np.log(2), # noqa: N815
description="The Gaussian full width at half maximum")
sqrt_pi = np.sqrt(np.pi)
sqrt_ln2 = np.sqrt(np.log(2))
sqrt_ln2pi = np.sqrt(np.log(2) * np.pi)
_last_z = np.zeros(1, dtype=complex)
_last_w = np.zeros(1, dtype=float)
_faddeeva = None
def __init__(self, x_0=x_0.default, amplitude_L=amplitude_L.default, # noqa: N803
fwhm_L=fwhm_L.default, fwhm_G=fwhm_G.default, method='humlicek2', # noqa: N803
**kwargs):
if str(method).lower() in ('wofz', 'scipy'):
try:
from scipy.special import wofz
except (ValueError, ImportError) as err:
raise ImportError(f'Voigt1D method {method} requires scipy: {err}.') from err
self._faddeeva = wofz
elif str(method).lower() == 'humlicek2':
self._faddeeva = self._hum2zpf16c
else:
raise ValueError(f'Not a valid method for Voigt1D Faddeeva function: {method}.')
self.method = self._faddeeva.__name__
super().__init__(x_0=x_0, amplitude_L=amplitude_L, fwhm_L=fwhm_L, fwhm_G=fwhm_G, **kwargs)
def _wrap_wofz(self, z):
"""Call complex error (Faddeeva) function w(z) implemented by algorithm `method`;
cache results for consecutive calls from `evaluate`, `fit_deriv`."""
if (z.shape == self._last_z.shape and
np.allclose(z, self._last_z, rtol=1.e-14, atol=1.e-15)):
return self._last_w
self._last_w = self._faddeeva(z)
self._last_z = z
return self._last_w
def evaluate(self, x, x_0, amplitude_L, fwhm_L, fwhm_G): # noqa: N803
"""One dimensional Voigt function scaled to Lorentz peak amplitude."""
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * self.sqrt_ln2 / fwhm_G
# The normalised Voigt profile is w.real * self.sqrt_ln2 / (self.sqrt_pi * fwhm_G) * 2 ;
# for the legacy definition we multiply with np.pi * fwhm_L / 2 * amplitude_L
return self._wrap_wofz(z).real * self.sqrt_ln2pi / fwhm_G * fwhm_L * amplitude_L
def fit_deriv(self, x, x_0, amplitude_L, fwhm_L, fwhm_G): # noqa: N803
"""Derivative of the one dimensional Voigt function with respect to parameters."""
s = self.sqrt_ln2 / fwhm_G
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * s
# V * constant from McLean implementation (== their Voigt function)
w = self._wrap_wofz(z) * s * fwhm_L * amplitude_L * self.sqrt_pi
# Schreier (2018) Eq. 6 == (dvdx + 1j * dvdy) / (sqrt(pi) * fwhm_L * amplitude_L)
dwdz = -2 * z * w + 2j * s * fwhm_L * amplitude_L
return [-dwdz.real * 2 * s,
w.real / amplitude_L,
w.real / fwhm_L - dwdz.imag * s,
(-w.real - s * (2 * (x - x_0) * dwdz.real - fwhm_L * dwdz.imag)) / fwhm_G]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'fwhm_L': inputs_unit[self.inputs[0]],
'fwhm_G': inputs_unit[self.inputs[0]],
'amplitude_L': outputs_unit[self.outputs[0]]}
@staticmethod
def _hum2zpf16c(z, s=10.0):
"""Complex error function w(z) for z = x + iy combining Humlicek's rational approximations:
|x| + y > 10: Humlicek (JQSRT, 1982) rational approximation for region II;
else: Humlicek (JQSRT, 1979) rational approximation with n=16 and delta=y0=1.35
Version using a mask and np.place;
single complex argument version of Franz Schreier's cpfX.hum2zpf16m.
Originally licensed under a 3-clause BSD style license - see
https://atmos.eoc.dlr.de/tools/lbl4IR/cpfX.py
"""
# Optimized (single fraction) Humlicek region I rational approximation for n=16, delta=1.35
AA = np.array([+46236.3358828121, -147726.58393079657j, # noqa: N806
-206562.80451354137, 281369.1590631087j,
+183092.74968253175, -184787.96830696272j,
-66155.39578477248, 57778.05827983565j,
+11682.770904216826, -9442.402767960672j,
-1052.8438624933142, 814.0996198624186j,
+45.94499030751872, -34.59751573708725j,
-0.7616559377907136, 0.5641895835476449j]) # 1j/sqrt(pi) to the 12. digit
bb = np.array([+7918.06640624997, 0.0,
-126689.0625, 0.0,
+295607.8125, 0.0,
-236486.25, 0.0,
+84459.375, 0.0,
-15015.0, 0.0,
+1365.0, 0.0,
-60.0, 0.0,
+1.0])
sqrt_piinv = 1.0 / np.sqrt(np.pi)
zz = z * z
w = 1j * (z * (zz * sqrt_piinv - 1.410474)) / (0.75 + zz*(zz - 3.0))
if np.any(z.imag < s):
mask = abs(z.real) + z.imag < s # returns true for interior points
# returns small complex array covering only the interior region
Z = z[np.where(mask)] + 1.35j
ZZ = Z * Z
numer = (((((((((((((((AA[15]*Z + AA[14])*Z + AA[13])*Z + AA[12])*Z + AA[11])*Z +
AA[10])*Z + AA[9])*Z + AA[8])*Z + AA[7])*Z + AA[6])*Z +
AA[5])*Z + AA[4])*Z+AA[3])*Z + AA[2])*Z + AA[1])*Z + AA[0])
denom = (((((((ZZ + bb[14])*ZZ + bb[12])*ZZ + bb[10])*ZZ+bb[8])*ZZ + bb[6])*ZZ +
bb[4])*ZZ + bb[2])*ZZ + bb[0]
np.place(w, mask, numer / denom)
return w
class Const1D(Fittable1DModel):
"""
One dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const2D
Notes
-----
Model formula:
.. math:: f(x) = A
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Const1D
plt.figure()
s1 = Const1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Value of the constant function")
linear = True
@staticmethod
def evaluate(x, amplitude):
"""One dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(x, subok=False)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False)
return x
@staticmethod
def fit_deriv(x, amplitude):
"""One dimensional Constant model derivative with respect to parameters"""
d_amplitude = np.ones_like(x)
return [d_amplitude]
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'amplitude': outputs_unit[self.outputs[0]]}
class Const2D(Fittable2DModel):
"""
Two dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x, y) = A
"""
amplitude = Parameter(default=1, description="Value of the constant function")
linear = True
@staticmethod
def evaluate(x, y, amplitude):
"""Two dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(x, subok=False)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False)
return x
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'amplitude': outputs_unit[self.outputs[0]]}
class Ellipse2D(Fittable2DModel):
"""
A 2D Ellipse model.
Parameters
----------
amplitude : float
Value of the ellipse.
x_0 : float
x position of the center of the disk.
y_0 : float
y position of the center of the disk.
a : float
The length of the semimajor axis.
b : float
The length of the semiminor axis.
theta : float
The rotation angle in radians of the semimajor axis. The
rotation angle increases counterclockwise from the positive x
axis.
See Also
--------
Disk2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
\\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos
\\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 +
\\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0)
\\cos \\theta}{b}\\right]^2 \\leq 1 \\\\
0 & : \\mathrm{otherwise}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Ellipse2D
from astropy.coordinates import Angle
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x0, y0 = 25, 25
a, b = 20, 10
theta = Angle(30, 'deg')
e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b,
theta=theta.radian)
y, x = np.mgrid[0:50, 0:50]
fig, ax = plt.subplots(1, 1)
ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r')
e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red',
facecolor='none')
ax.add_patch(e2)
plt.show()
"""
amplitude = Parameter(default=1, description="Value of the ellipse")
x_0 = Parameter(default=0, description="X position of the center of the disk.")
y_0 = Parameter(default=0, description="Y position of the center of the disk.")
a = Parameter(default=1, description="The length of the semimajor axis")
b = Parameter(default=1, description="The length of the semiminor axis")
theta = Parameter(default=0, description="The rotation angle in radians of the semimajor axis (Positive - counterclockwise)")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, a, b, theta):
"""Two dimensional Ellipse model function."""
xx = x - x_0
yy = y - y_0
cost = np.cos(theta)
sint = np.sin(theta)
numerator1 = (xx * cost) + (yy * sint)
numerator2 = -(xx * sint) + (yy * cost)
in_ellipse = (((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.)
result = np.select([in_ellipse], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
a = self.a
b = self.b
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'a': inputs_unit[self.inputs[0]],
'b': inputs_unit[self.inputs[0]],
'theta': u.rad,
'amplitude': outputs_unit[self.outputs[0]]}
class Disk2D(Fittable2DModel):
"""
Two dimensional radial symmetric Disk model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
R_0 : float
Radius of the disk
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r \\leq R_0 \\\\
0 & : r > R_0
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Value of disk function")
x_0 = Parameter(default=0, description="X position of center of the disk")
y_0 = Parameter(default=0, description="Y position of center of the disk")
R_0 = Parameter(default=1, description="Radius of the disk")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0):
"""Two dimensional Disk model function"""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
result = np.select([rr <= R_0 ** 2], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
return ((self.y_0 - self.R_0, self.y_0 + self.R_0),
(self.x_0 - self.R_0, self.x_0 + self.R_0))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'R_0': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Ring2D(Fittable2DModel):
"""
Two dimensional radial symmetric Ring model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
r_in : float
Inner radius of the ring
width : float
Width of the ring.
r_out : float
Outer Radius of the ring. Can be specified instead of width.
See Also
--------
Disk2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r_{in} \\leq r \\leq r_{out} \\\\
0 & : \\text{else}
\\end{array}
\\right.
Where :math:`r_{out} = r_{in} + r_{width}`.
"""
amplitude = Parameter(default=1, description="Value of the disk function")
x_0 = Parameter(default=0, description="X position of center of disc")
y_0 = Parameter(default=0, description="Y position of center of disc")
r_in = Parameter(default=1, description="Inner radius of the ring")
width = Parameter(default=1, description="Width of the ring")
def __init__(self, amplitude=amplitude.default, x_0=x_0.default,
y_0=y_0.default, r_in=r_in.default, width=width.default,
r_out=None, **kwargs):
# If outer radius explicitly given, it overrides default width.
if r_out is not None:
if width != self.width.default:
raise InputParameterError(
"Cannot specify both width and outer radius separately.")
width = r_out - r_in
elif width is None:
width = self.width.default
super().__init__(
amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width,
**kwargs)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width):
"""Two dimensional Ring model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
r_range = np.logical_and(rr >= r_in ** 2, rr <= (r_in + width) ** 2)
result = np.select([r_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'r_in': inputs_unit[self.inputs[0]],
'width': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Box1D(Fittable1DModel):
"""
One dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
Position of the center of the box function
width : float
Width of the box
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\
0 & : \\text{else}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Box1D
plt.figure()
s1 = Box1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude A")
x_0 = Parameter(default=0, description="Position of center of box function")
width = Parameter(default=1, description="Width of the box")
@staticmethod
def evaluate(x, amplitude, x_0, width):
"""One dimensional Box model function"""
inside = np.logical_and(x >= x_0 - width / 2., x <= x_0 + width / 2.)
return np.select([inside], [amplitude], 0)
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
return {self.outputs[0]: self.amplitude.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'width': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Box2D(Fittable2DModel):
"""
Two dimensional Box model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the center of the box function
x_width : float
Width in x direction of the box
y_0 : float
y position of the center of the box function
y_width : float
Width in y direction of the box
See Also
--------
Box1D, Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\
& y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\
0 : & \\text{else}
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Amplitude")
x_0 = Parameter(default=0, description="X position of the center of the box function")
y_0 = Parameter(default=0, description="Y position of the center of the box function")
x_width = Parameter(default=1, description="Width in x direction of the box")
y_width = Parameter(default=1, description="Width in y direction of the box")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width):
"""Two dimensional Box model function"""
x_range = np.logical_and(x >= x_0 - x_width / 2.,
x <= x_0 + x_width / 2.)
y_range = np.logical_and(y >= y_0 - y_width / 2.,
y <= y_0 + y_width / 2.)
result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dx = self.x_width / 2
dy = self.y_width / 2
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[1]],
'x_width': inputs_unit[self.inputs[0]],
'y_width': inputs_unit[self.inputs[1]],
'amplitude': outputs_unit[self.outputs[0]]}
class Trapezoid1D(Fittable1DModel):
"""
One dimensional Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
Center position of the trapezoid
width : float
Width of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid
See Also
--------
Box1D, Gaussian1D, Moffat1D
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Trapezoid1D
plt.figure()
s1 = Trapezoid1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="Center position of the trapezoid")
width = Parameter(default=1, description="Width of constant part of the trapezoid")
slope = Parameter(default=1, description="Slope of the tails of trapezoid")
@staticmethod
def evaluate(x, amplitude, x_0, width, slope):
"""One dimensional Trapezoid model function"""
# Compute the four points where the trapezoid changes slope
# x1 <= x2 <= x3 <= x4
x2 = x_0 - width / 2.
x3 = x_0 + width / 2.
x1 = x2 - amplitude / slope
x4 = x3 + amplitude / slope
# Compute model values in pieces between the change points
range_a = np.logical_and(x >= x1, x < x2)
range_b = np.logical_and(x >= x2, x < x3)
range_c = np.logical_and(x >= x3, x < x4)
val_a = slope * (x - x1)
val_b = amplitude
val_c = slope * (x4 - x)
result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2 + self.amplitude / self.slope
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'width': inputs_unit[self.inputs[0]],
'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class TrapezoidDisk2D(Fittable2DModel):
"""
Two dimensional circular Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
x position of the center of the trapezoid
y_0 : float
y position of the center of the trapezoid
R_0 : float
Radius of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid in x direction.
See Also
--------
Disk2D, Box2D
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="X position of the center of the trapezoid")
y_0 = Parameter(default=0, description="Y position of the center of the trapezoid")
R_0 = Parameter(default=1, description="Radius of constant part of trapezoid")
slope = Parameter(default=1, description="Slope of tails of trapezoid in x direction")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0, slope):
"""Two dimensional Trapezoid Disk model function"""
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2)
range_1 = r <= R_0
range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope)
val_1 = amplitude
val_2 = amplitude + slope * (R_0 - r)
result = np.select([range_1, range_2], [val_1, val_2])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.R_0 + self.amplitude / self.slope
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'R_0': inputs_unit[self.inputs[0]],
'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class RickerWavelet1D(Fittable1DModel):
"""
One dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
Position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet2D, Box1D, Gaussian1D, Trapezoid1D
Notes
-----
Model formula:
.. math::
f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import RickerWavelet1D
plt.figure()
s1 = RickerWavelet1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -2, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="Position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, amplitude, x_0, sigma):
"""One dimensional Ricker Wavelet model function"""
xx_ww = (x - x_0) ** 2 / (2 * sigma ** 2)
return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww)
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of sigma used to define the limits.
"""
x0 = self.x_0
dx = factor * self.sigma
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'sigma': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class RickerWavelet2D(Fittable2DModel):
"""
Two dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the peak
y_0 : float
y position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet1D, Gaussian2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}
+ \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{\\frac{- \\left(x - x_{0}\\right)^{2}
- \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}}
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, sigma):
"""Two dimensional Ricker Wavelet model function"""
rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma ** 2)
return amplitude * (1 - rr_ww) * np.exp(- rr_ww)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'sigma': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class AiryDisk2D(Fittable2DModel):
"""
Two dimensional Airy disk model.
Parameters
----------
amplitude : float
Amplitude of the Airy function.
x_0 : float
x position of the maximum of the Airy function.
y_0 : float
y position of the maximum of the Airy function.
radius : float
The radius of the Airy disk (radius of the first zero).
See Also
--------
Box2D, TrapezoidDisk2D, Gaussian2D
Notes
-----
Model formula:
.. math:: f(r) = A \\left[\\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}}\\right]^2
Where :math:`J_1` is the first order Bessel function of the first
kind, :math:`r` is radial distance from the maximum of the Airy
function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R`
is the input ``radius`` parameter, and :math:`R_z =
1.2196698912665045`).
For an optical system, the radius of the first zero represents the
limiting angular resolution and is approximately 1.22 * lambda / D,
where lambda is the wavelength of the light and D is the diameter of
the aperture.
See [1]_ for more details about the Airy disk.
References
----------
.. [1] https://en.wikipedia.org/wiki/Airy_disk
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the Airy function")
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
radius = Parameter(default=1,
description="The radius of the Airy disk (radius of first zero crossing)")
_rz = None
_j1 = None
@classmethod
def evaluate(cls, x, y, amplitude, x_0, y_0, radius):
"""Two dimensional Airy model function"""
if cls._rz is None:
try:
from scipy.special import j1, jn_zeros
cls._rz = jn_zeros(1, 1)[0] / np.pi
cls._j1 = j1
except ValueError:
raise ImportError('AiryDisk2D model requires scipy.')
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz)
if isinstance(r, Quantity):
# scipy function cannot handle Quantity, so turn into array.
r = r.to_value(u.dimensionless_unscaled)
# Since r can be zero, we have to take care to treat that case
# separately so as not to raise a numpy warning
z = np.ones(r.shape)
rt = np.pi * r[r > 0]
z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2
if isinstance(amplitude, Quantity):
# make z quantity too, otherwise in-place multiplication fails.
z = Quantity(z, u.dimensionless_unscaled, copy=False)
z *= amplitude
return z
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'radius': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Moffat1D(Fittable1DModel):
"""
One dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian1D, Box1D
Notes
-----
Model formula:
.. math::
f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Moffat1D
plt.figure()
s1 = Moffat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the model")
x_0 = Parameter(default=0, description="X position of maximum of Moffat model")
gamma = Parameter(default=1, description="Core width of Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach <https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model function"""
return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model derivative with respect to parameters"""
fac = (1 + (x - x_0) ** 2 / gamma ** 2)
d_A = fac ** (-alpha)
d_x_0 = (2 * amplitude * alpha * (x - x_0) * d_A / (fac * gamma ** 2))
d_gamma = (2 * amplitude * alpha * (x - x_0) ** 2 * d_A /
(fac * gamma ** 3))
d_alpha = -amplitude * d_A * np.log(fac)
return [d_A, d_x_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'gamma': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Moffat2D(Fittable2DModel):
"""
Two dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
y_0 : float
y position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} +
\\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the model")
x_0 = Parameter(default=0, description="X position of the maximum of the Moffat model")
y_0 = Parameter(default=0, description="Y position of the maximum of the Moffat model")
gamma = Parameter(default=1, description="Core width of the Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach <https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model function"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
return amplitude * (1 + rr_gg) ** (-alpha)
@staticmethod
def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model derivative with respect to parameters"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
d_A = (1 + rr_gg) ** (-alpha)
d_x_0 = (2 * amplitude * alpha * d_A * (x - x_0) /
(gamma ** 2 * (1 + rr_gg)))
d_y_0 = (2 * amplitude * alpha * d_A * (y - y_0) /
(gamma ** 2 * (1 + rr_gg)))
d_alpha = -amplitude * d_A * np.log(1 + rr_gg)
d_gamma = (2 * amplitude * alpha * d_A * rr_gg /
(gamma * (1 + rr_gg)))
return [d_A, d_x_0, d_y_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'gamma': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Sersic2D(Fittable2DModel):
r"""
Two dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
x_0 : float, optional
x position of the center.
y_0 : float, optional
y position of the center.
ellip : float, optional
Ellipticity.
theta : float, optional
Rotation angle in radians, counterclockwise from
the positive x-axis.
See Also
--------
Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
I(x,y) = I(r) = I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (2n,b_n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic2D
import matplotlib.pyplot as plt
x,y = np.meshgrid(np.arange(100), np.arange(100))
mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50,
ellip=.5, theta=-1)
img = mod(x, y)
log_img = np.log10(img)
plt.figure()
plt.imshow(log_img, origin='lower', interpolation='nearest',
vmin=-1, vmax=2)
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.set_label('Log Brightness', rotation=270, labelpad=25)
cbar.set_ticks([-1, 0, 1, 2], update_ticks=True)
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
x_0 = Parameter(default=0, description="X position of the center")
y_0 = Parameter(default=0, description="Y position of the center")
ellip = Parameter(default=0, description="Ellipticity")
theta = Parameter(default=0, description="Rotation angle in radians (counterclockwise-positive)")
_gammaincinv = None
@classmethod
def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta):
"""Two dimensional Sersic profile function."""
if cls._gammaincinv is None:
try:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
except ValueError:
raise ImportError('Sersic2D model requires scipy.')
bn = cls._gammaincinv(2. * n, 0.5)
a, b = r_eff, (1 - ellip) * r_eff
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta
x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta
z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2)
return amplitude * np.exp(-bn * (z ** (1 / n) - 1))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'r_eff': inputs_unit[self.inputs[0]],
'theta': u.rad,
'amplitude': outputs_unit[self.outputs[0]]}
class KingProjectedAnalytic1D(Fittable1DModel):
"""
Projected (surface density) analytic King Model.
Parameters
----------
amplitude : float
Amplitude or scaling factor.
r_core : float
Core radius (f(r_c) ~ 0.5 f_0)
r_tide : float
Tidal radius.
Notes
-----
This model approximates a King model with an analytic function. The derivation of this
equation can be found in King '62 (equation 14). This is just an approximation of the
full model and the parameters derived from this model should be taken with caution.
It usually works for models with a concentration (c = log10(r_t/r_c) paramter < 2.
Model formula:
.. math::
f(x) = A r_c^2 \\left(\\frac{1}{\\sqrt{(x^2 + r_c^2)}} -
\\frac{1}{\\sqrt{(r_t^2 + r_c^2)}}\\right)^2
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import KingProjectedAnalytic1D
import matplotlib.pyplot as plt
plt.figure()
rt_list = [1, 2, 5, 10, 20]
for rt in rt_list:
r = np.linspace(0.1, rt, 100)
mod = KingProjectedAnalytic1D(amplitude = 1, r_core = 1., r_tide = rt)
sig = mod(r)
plt.loglog(r, sig/sig[0], label='c ~ {:0.2f}'.format(mod.concentration))
plt.xlabel("r")
plt.ylabel(r"$\\sigma/\\sigma_0$")
plt.legend()
plt.show()
References
----------
.. [1] https://ui.adsabs.harvard.edu/abs/1962AJ.....67..471K
"""
amplitude = Parameter(default=1, bounds=(FLOAT_EPSILON, None), description="Amplitude or scaling factor")
r_core = Parameter(default=1, bounds=(FLOAT_EPSILON, None), description="Core Radius")
r_tide = Parameter(default=2, bounds=(FLOAT_EPSILON, None), description="Tidal Radius")
@property
def concentration(self):
"""Concentration parameter of the king model"""
return np.log10(np.abs(self.r_tide/self.r_core))
@staticmethod
def evaluate(x, amplitude, r_core, r_tide):
"""
Analytic King model function.
"""
result = amplitude * r_core ** 2 * (1/np.sqrt(x ** 2 + r_core ** 2) -
1/np.sqrt(r_tide ** 2 + r_core ** 2)) ** 2
# Set invalid r values to 0
bounds = (x >= r_tide) | (x < 0)
result[bounds] = result[bounds] * 0.
return result
@staticmethod
def fit_deriv(x, amplitude, r_core, r_tide):
"""
Analytic King model function derivatives.
"""
d_amplitude = r_core ** 2 * (1/np.sqrt(x ** 2 + r_core ** 2) -
1/np.sqrt(r_tide ** 2 + r_core ** 2)) ** 2
d_r_core = 2 * amplitude * r_core ** 2 * (r_core/(r_core ** 2 + r_tide ** 2) ** (3/2) -
r_core/(r_core ** 2 + x ** 2) ** (3/2)) * \
(1./np.sqrt(r_core ** 2 + x ** 2) - 1./np.sqrt(r_core ** 2 + r_tide ** 2)) + \
2 * amplitude * r_core * (1./np.sqrt(r_core ** 2 + x ** 2) -
1./np.sqrt(r_core ** 2 + r_tide ** 2)) ** 2
d_r_tide = (2 * amplitude * r_core ** 2 * r_tide *
(1./np.sqrt(r_core ** 2 + x ** 2) -
1./np.sqrt(r_core ** 2 + r_tide ** 2)))/(r_core ** 2 + r_tide ** 2) ** (3/2)
# Set invalid r values to 0
bounds = (x >= r_tide) | (x < 0)
d_amplitude[bounds] = d_amplitude[bounds]*0
d_r_core[bounds] = d_r_core[bounds]*0
d_r_tide[bounds] = d_r_tide[bounds]*0
return [d_amplitude, d_r_core, d_r_tide]
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
The model is not defined for r > r_tide.
``(r_low, r_high)``
"""
return (0 * self.r_tide, 1 * self.r_tide)
@property
def input_units(self):
if self.r_core.unit is None:
return None
return {self.inputs[0]: self.r_core.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'r_core': inputs_unit[self.inputs[0]],
'r_tide': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Logarithmic1D(Fittable1DModel):
"""
One dimensional logarithmic model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Exponential1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.log(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
d_amplitude = np.log(x / tau)
d_tau = np.zeros(x.shape) - (amplitude / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Exponential1D(amplitude=new_amplitude, tau=new_tau)
@tau.validator
def tau(self, val):
if val == 0:
raise ValueError("0 is not an allowed value for tau")
@property
def input_units(self):
if self.tau.unit is None:
return None
return {self.inputs[0]: self.tau.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'tau': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Exponential1D(Fittable1DModel):
"""
One dimensional exponential model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Logarithmic1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.exp(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
''' Derivative with respect to parameters'''
d_amplitude = np.exp(x / tau)
d_tau = -amplitude * (x / tau**2) * np.exp(x / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Logarithmic1D(amplitude=new_amplitude, tau=new_tau)
@tau.validator
def tau(self, val):
''' tau cannot be 0'''
if val == 0:
raise ValueError("0 is not an allowed value for tau")
@property
def input_units(self):
if self.tau.unit is None:
return None
return {self.inputs[0]: self.tau.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'tau': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
@deprecated('4.0', alternative='RickerWavelet1D')
class MexicanHat1D(RickerWavelet1D):
""" Deprecated."""
@deprecated('4.0', alternative='RickerWavelet2D')
class MexicanHat2D(RickerWavelet2D):
""" Deprecated."""
| bsd-3-clause |
billy-inn/scikit-learn | examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
marcocaccin/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 93 | 2471 | # Author: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_array_equal(xi.data, X_csr[idx].data)
assert_array_equal(xi.indices, X_csr[idx].indices)
assert_array_equal(xi.indptr, X_csr[idx].indptr)
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_array_equal(xi.data, X_csr[idx].data)
assert_array_equal(xi.indices, X_csr[idx].indices)
assert_array_equal(xi.indptr, X_csr[idx].indptr)
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
poryfly/scikit-learn | benchmarks/bench_plot_omp_lars.py | 266 | 4447 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
| bsd-3-clause |
jkarnows/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
sergeimoiseev/othodi | old/embedding_webagg.py | 3 | 8190 | """
This example demonstrates how to embed matplotlib WebAgg interactive
plotting in your own web application and framework. It is not
necessary to do all this if you merely want to display a plot in a
browser or use matplotlib's built-in Tornado-based server "on the
side".
The framework being used must support web sockets.
"""
import io
try:
import tornado
except ImportError:
raise RuntimeError("This example requires tornado.")
import tornado.web
import tornado.httpserver
import tornado.ioloop
import tornado.websocket
from matplotlib.backends.backend_webagg_core import (
FigureManagerWebAgg, new_figure_manager_given_figure)
from matplotlib.figure import Figure
import numpy as np
import json
def create_figure():
"""
Creates a simple example figure.
"""
fig = Figure()
a = fig.add_subplot(111)
t = np.arange(0.0, 3.0, 0.01)
s = np.sin(2 * np.pi * t)
a.plot(t, s)
return fig
# The following is the content of the web page. You would normally
# generate this using some sort of template facility in your web
# framework, but here we just use Python string formatting.
html_content = """
<html>
<head>
<!-- TODO: There should be a way to include all of the required javascript
and CSS so matplotlib can add to the set in the future if it
needs to. -->
<link rel="stylesheet" href="_static/css/page.css" type="text/css">
<link rel="stylesheet" href="_static/css/boilerplate.css" type="text/css" />
<link rel="stylesheet" href="_static/css/fbm.css" type="text/css" />
<link rel="stylesheet" href="_static/jquery/css/themes/base/jquery-ui.min.css" >
<script src="_static/jquery/js/jquery-1.11.3.min.js"></script>
<script src="_static/jquery/js/jquery-ui.min.js"></script>
<script src="mpl.js"></script>
<script>
/* This is a callback that is called when the user saves
(downloads) a file. Its purpose is really to map from a
figure and file format to a url in the application. */
function ondownload(figure, format) {
window.open('download.' + format, '_blank');
};
$(document).ready(
function() {
/* It is up to the application to provide a websocket that the figure
will use to communicate to the server. This websocket object can
also be a "fake" websocket that underneath multiplexes messages
from multiple figures, if necessary. */
var websocket_type = mpl.get_websocket_type();
var websocket = new websocket_type("%(ws_uri)sws");
// mpl.figure creates a new figure on the webpage.
var fig = new mpl.figure(
// A unique numeric identifier for the figure
%(fig_id)s,
// A websocket object (or something that behaves like one)
websocket,
// A function called when a file type is selected for download
ondownload,
// The HTML element in which to place the figure
$('div#figure'));
}
);
</script>
<title>matplotlib</title>
</head>
<body>
<div id="figure">
</div>
</body>
</html>
"""
class MyApplication(tornado.web.Application):
class MainPage(tornado.web.RequestHandler):
"""
Serves the main HTML page.
"""
def get(self):
manager = self.application.manager
ws_uri = "ws://{req.host}/".format(req=self.request)
content = html_content % {
"ws_uri": ws_uri, "fig_id": manager.num}
self.write(content)
class MplJs(tornado.web.RequestHandler):
"""
Serves the generated matplotlib javascript file. The content
is dynamically generated based on which toolbar functions the
user has defined. Call `FigureManagerWebAgg` to get its
content.
"""
def get(self):
self.set_header('Content-Type', 'application/javascript')
js_content = FigureManagerWebAgg.get_javascript()
self.write(js_content)
class Download(tornado.web.RequestHandler):
"""
Handles downloading of the figure in various file formats.
"""
def get(self, fmt):
manager = self.application.manager
mimetypes = {
'ps': 'application/postscript',
'eps': 'application/postscript',
'pdf': 'application/pdf',
'svg': 'image/svg+xml',
'png': 'image/png',
'jpeg': 'image/jpeg',
'tif': 'image/tiff',
'emf': 'application/emf'
}
self.set_header('Content-Type', mimetypes.get(fmt, 'binary'))
buff = io.BytesIO()
manager.canvas.print_figure(buff, format=fmt)
self.write(buff.getvalue())
class WebSocket(tornado.websocket.WebSocketHandler):
"""
A websocket for interactive communication between the plot in
the browser and the server.
In addition to the methods required by tornado, it is required to
have two callback methods:
- ``send_json(json_content)`` is called by matplotlib when
it needs to send json to the browser. `json_content` is
a JSON tree (Python dictionary), and it is the responsibility
of this implementation to encode it as a string to send over
the socket.
- ``send_binary(blob)`` is called to send binary image data
to the browser.
"""
supports_binary = True
def open(self):
# Register the websocket with the FigureManager.
manager = self.application.manager
manager.add_web_socket(self)
if hasattr(self, 'set_nodelay'):
self.set_nodelay(True)
def on_close(self):
# When the socket is closed, deregister the websocket with
# the FigureManager.
manager = self.application.manager
manager.remove_web_socket(self)
def on_message(self, message):
# The 'supports_binary' message is relevant to the
# websocket itself. The other messages get passed along
# to matplotlib as-is.
# Every message has a "type" and a "figure_id".
message = json.loads(message)
if message['type'] == 'supports_binary':
self.supports_binary = message['value']
else:
manager = self.application.manager
manager.handle_json(message)
def send_json(self, content):
self.write_message(json.dumps(content))
def send_binary(self, blob):
if self.supports_binary:
self.write_message(blob, binary=True)
else:
data_uri = "data:image/png;base64,{0}".format(
blob.encode('base64').replace('\n', ''))
self.write_message(data_uri)
def __init__(self, figure):
self.figure = figure
self.manager = new_figure_manager_given_figure(
id(figure), figure)
super(MyApplication, self).__init__([
# Static files for the CSS and JS
(r'/_static/(.*)',
tornado.web.StaticFileHandler,
{'path': FigureManagerWebAgg.get_static_file_path()}),
# The page that contains all of the pieces
('/', self.MainPage),
('/mpl.js', self.MplJs),
# Sends images and events to the browser, and receives
# events from the browser
('/ws', self.WebSocket),
# Handles the downloading (i.e., saving) of static images
(r'/download.([a-z0-9.]+)', self.Download),
])
if __name__ == "__main__":
figure = create_figure()
application = MyApplication(figure)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8080)
print("http://127.0.0.1:8080/")
print("Press Ctrl+C to quit")
tornado.ioloop.IOLoop.instance().start()
| mit |
liukaijv/XlsxWriter | examples/pandas_chart_line.py | 9 | 1739 | ##############################################################################
#
# An example of converting a Pandas dataframe to an xlsx file with a line
# chart using Pandas and XlsxWriter.
#
# Copyright 2013-2015, John McNamara, [email protected]
#
import pandas as pd
import random
# Create some sample data to plot.
max_row = 21
categories = ['Node 1', 'Node 2', 'Node 3', 'Node 4']
index_1 = range(0, max_row, 1)
multi_iter1 = {'index': index_1}
for category in categories:
multi_iter1[category] = [random.randint(10, 100) for x in index_1]
# Create a Pandas dataframe from the data.
index_2 = multi_iter1.pop('index')
df = pd.DataFrame(multi_iter1, index=index_2)
df = df.reindex(columns=sorted(df.columns))
# Create a Pandas Excel writer using XlsxWriter as the engine.
sheet_name = 'Sheet1'
writer = pd.ExcelWriter('pandas_chart_line.xlsx', engine='xlsxwriter')
df.to_excel(writer, sheet_name=sheet_name)
# Access the XlsxWriter workbook and worksheet objects from the dataframe.
workbook = writer.book
worksheet = writer.sheets[sheet_name]
# Create a chart object.
chart = workbook.add_chart({'type': 'line'})
# Configure the series of the chart from the dataframe data.
for i in range(len(categories)):
col = i + 1
chart.add_series({
'name': ['Sheet1', 0, col],
'categories': ['Sheet1', 1, 0, max_row, 0],
'values': ['Sheet1', 1, col, max_row, col],
})
# Configure the chart axes.
chart.set_x_axis({'name': 'Index'})
chart.set_y_axis({'name': 'Value', 'major_gridlines': {'visible': False}})
# Insert the chart into the worksheet.
worksheet.insert_chart('G2', chart)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
| bsd-2-clause |
wdurhamh/statsmodels | statsmodels/formula/formulatools.py | 32 | 3846 | from statsmodels.compat.python import iterkeys
import statsmodels.tools.data as data_util
from patsy import dmatrices, NAAction
import numpy as np
# if users want to pass in a different formula framework, they can
# add their handler here. how to do it interactively?
# this is a mutable object, so editing it should show up in the below
formula_handler = {}
class NAAction(NAAction):
# monkey-patch so we can handle missing values in 'extra' arrays later
def _handle_NA_drop(self, values, is_NAs, origins):
total_mask = np.zeros(is_NAs[0].shape[0], dtype=bool)
for is_NA in is_NAs:
total_mask |= is_NA
good_mask = ~total_mask
self.missing_mask = total_mask
# "..." to handle 1- versus 2-dim indexing
return [v[good_mask, ...] for v in values]
def handle_formula_data(Y, X, formula, depth=0, missing='drop'):
"""
Returns endog, exog, and the model specification from arrays and formula
Parameters
----------
Y : array-like
Either endog (the LHS) of a model specification or all of the data.
Y must define __getitem__ for now.
X : array-like
Either exog or None. If all the data for the formula is provided in
Y then you must explicitly set X to None.
formula : str or patsy.model_desc
You can pass a handler by import formula_handler and adding a
key-value pair where the key is the formula object class and
the value is a function that returns endog, exog, formula object
Returns
-------
endog : array-like
Should preserve the input type of Y,X
exog : array-like
Should preserve the input type of Y,X. Could be None.
"""
# half ass attempt to handle other formula objects
if isinstance(formula, tuple(iterkeys(formula_handler))):
return formula_handler[type(formula)]
na_action = NAAction(on_NA=missing)
if X is not None:
if data_util._is_using_pandas(Y, X):
result = dmatrices(formula, (Y, X), depth,
return_type='dataframe', NA_action=na_action)
else:
result = dmatrices(formula, (Y, X), depth,
return_type='dataframe', NA_action=na_action)
else:
if data_util._is_using_pandas(Y, None):
result = dmatrices(formula, Y, depth, return_type='dataframe',
NA_action=na_action)
else:
result = dmatrices(formula, Y, depth, return_type='dataframe',
NA_action=na_action)
# if missing == 'raise' there's not missing_mask
missing_mask = getattr(na_action, 'missing_mask', None)
if not np.any(missing_mask):
missing_mask = None
if len(result) > 1: # have RHS design
design_info = result[1].design_info # detach it from DataFrame
else:
design_info = None
# NOTE: is there ever a case where we'd need LHS design_info?
return result, missing_mask, design_info
def _remove_intercept_patsy(terms):
"""
Remove intercept from Patsy terms.
"""
from patsy.desc import INTERCEPT
if INTERCEPT in terms:
terms.remove(INTERCEPT)
return terms
def _has_intercept(design_info):
from patsy.desc import INTERCEPT
return INTERCEPT in design_info.terms
def _intercept_idx(design_info):
"""
Returns boolean array index indicating which column holds the intercept
"""
from patsy.desc import INTERCEPT
from numpy import array
return array([INTERCEPT == i for i in design_info.terms])
def make_hypotheses_matrices(model_results, test_formula):
"""
"""
from patsy.constraint import linear_constraint
exog_names = model_results.model.exog_names
LC = linear_constraint(test_formula, exog_names)
return LC
| bsd-3-clause |
GiggleLiu/QuRBM | tests/test_vmc.py | 1 | 3338 | from numpy import *
from numpy.testing import dec,assert_,assert_raises,assert_almost_equal,assert_allclose
from scipy.sparse.linalg import LinearOperator
from scipy.linalg import kron,norm
from matplotlib.pyplot import *
import sys,pdb,time
from os import path
sys.path.insert(0,'../')
from tba.hgen import SpinSpaceConfig,sx,sy,sz
from vmc import *
from rbm import *
from toymodel import *
from cgen import *
from linop import *
from group import TIGroup
random.seed(21)
def analyse_sampling(configs,rbm):
scfg=SpinSpaceConfig([rbm.nin,2])
s=zeros(scfg.hndim)
add.at(s,scfg.config2ind((1-asarray(configs))/2),1)
v0=rbm.tovec(scfg)
s2=abs(v0/norm(v0))**2
ion()
plot(s/s.sum())
plot(s2)
pdb.set_trace()
class VMCTest(object):
def __init__(self,model='AFH'):
self.nsite=4
#construct operator H act on config
if model=='AFH':
self.h=HeisenbergH(nsite=self.nsite,J=1.,periodic=True)
elif model=='AFH2D':
N=int(sqrt(self.nsite))
self.h=HeisenbergH2D(N,N,J=-4.,Jz=2.,periodic=True)
#generate a rbm and the corresponding vector v
self.rbm=RBM(a=[0.1,0.2j,0.3,-0.5],b=[-0.1,0.2,0.,-0.5j],W=kron(sx,sx)+kron(sy,sy))
self.rbm_g=RBM(a=[0.1,0.2j,0.3,-0.5],b=[-0.5j],\
W=reshape([0.3,-0.2,0.4j,0.1],[self.nsite,1]),group=TIGroup([self.nsite] if model!='AFH2D' else [N,N]))
#vmc config
cgen=RBMConfigGenerator(nflip=2,initial_config=array([-1,1]*2))
self.vmc=VMC(cgen,nbath=5000*self.nsite,nsample=50000*self.nsite,nmeasure=self.nsite,sampling_method='metropolis')
#fake vmc
self.fv=FakeVMC(self.h)
def test_measureh(self):
print 'VMC measurements on HeisenbergH.'
for rbm in [self.rbm,self.rbm_g]:
#measurements
O_true=self.fv.measure(self.h,rbm)/self.nsite
O_vmc=self.vmc.measure(self.h,rbm)/self.nsite
err=abs(O_vmc-O_true)
print 'E/site = %s (%s), Error/site = %s'%(O_vmc,O_true,err)
#analyse_sampling(self.vmc._config_histo,rbm)
assert_(err<0.1)
def test_measurepw(self):
print 'VMC measurements on PartialW.'
#construct operator pw act on config
pw=PartialW()
for rbm in [self.rbm_g,self.rbm]:
#measurements
O_true=self.fv.measure(pw,rbm)
O_vmc=self.vmc.measure(pw,rbm)
err=abs(O_vmc-O_true).mean()
print 'Error = %.4f%%'%(err*100)
#analyse_sampling(self.vmc._config_histo,rbm)
assert_(err<0.1)
def test_measureq(self):
print 'VMC measurements on OpQueue.'
#construct operator q act on config
pw=PartialW()
q=OpQueue((pw,self.h),(lambda a,b:a.conj()[...,newaxis]*a,lambda a,b:a.conj()*b))
for rbm in [self.rbm_g,self.rbm]:
#measurements
O_trues=self.fv.measure(q,rbm)
O_vmcs=self.vmc.measure(q,rbm)
for O_true,O_vmc in zip(O_trues,O_vmcs):
err=abs(O_vmc-O_true).mean()
print 'Error = %.4f/%s'%(err,abs(O_true).mean())
#assert_(err<0.1)
if __name__=='__main__':
t=VMCTest(model='AFH2D')
t.test_measureq()
pdb.set_trace()
t.test_measureh()
t.test_measurepw()
| mit |
JPFrancoia/scikit-learn | sklearn/model_selection/tests/test_split.py | 7 | 41116 | """Test the split module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csc_matrix, csr_matrix
from scipy import stats
from scipy.misc import comb
from itertools import combinations
from sklearn.utils.fixes import combinations_with_replacement
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import _num_samples
from sklearn.utils.mocking import MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection import check_cv
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Ridge
from sklearn.model_selection._split import _validate_shuffle_split
from sklearn.model_selection._split import _CVIterableWrapper
from sklearn.model_selection._split import _build_repr
from sklearn.datasets import load_digits
from sklearn.datasets import make_classification
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.svm import SVC
X = np.ones(10)
y = np.arange(10) // 2
P_sparse = coo_matrix(np.eye(5))
digits = load_digits()
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
@ignore_warnings
def test_cross_validator_with_default_params():
n_samples = 4
n_unique_groups = 4
n_splits = 2
p = 2
n_shuffle_splits = 10 # (the default value)
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
X_1d = np.array([1, 2, 3, 4])
y = np.array([1, 1, 2, 2])
groups = np.array([1, 2, 3, 4])
loo = LeaveOneOut()
lpo = LeavePOut(p)
kf = KFold(n_splits)
skf = StratifiedKFold(n_splits)
lolo = LeaveOneGroupOut()
lopo = LeavePGroupsOut(p)
ss = ShuffleSplit(random_state=0)
ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2
loo_repr = "LeaveOneOut()"
lpo_repr = "LeavePOut(p=2)"
kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)"
skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)"
lolo_repr = "LeaveOneGroupOut()"
lopo_repr = "LeavePGroupsOut(n_groups=2)"
ss_repr = ("ShuffleSplit(n_splits=10, random_state=0, test_size=0.1, "
"train_size=None)")
ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))"
n_splits_expected = [n_samples, comb(n_samples, p), n_splits, n_splits,
n_unique_groups, comb(n_unique_groups, p),
n_shuffle_splits, 2]
for i, (cv, cv_repr) in enumerate(zip(
[loo, lpo, kf, skf, lolo, lopo, ss, ps],
[loo_repr, lpo_repr, kf_repr, skf_repr, lolo_repr, lopo_repr,
ss_repr, ps_repr])):
# Test if get_n_splits works correctly
assert_equal(n_splits_expected[i], cv.get_n_splits(X, y, groups))
# Test if the cross-validator works as expected even if
# the data is 1d
np.testing.assert_equal(list(cv.split(X, y, groups)),
list(cv.split(X_1d, y, groups)))
# Test that train, test indices returned are integers
for train, test in cv.split(X, y, groups):
assert_equal(np.asarray(train).dtype.kind, 'i')
assert_equal(np.asarray(train).dtype.kind, 'i')
# Test if the repr works without any errors
assert_equal(cv_repr, repr(cv))
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, X, y, groups, expected_n_splits=None):
n_samples = _num_samples(X)
# Check that a all the samples appear at least once in a test fold
if expected_n_splits is not None:
assert_equal(cv.get_n_splits(X, y, groups), expected_n_splits)
else:
expected_n_splits = cv.get_n_splits(X, y, groups)
collected_test_samples = set()
iterations = 0
for train, test in cv.split(X, y, groups):
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_splits)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
X1 = np.array([[1, 2], [3, 4], [5, 6]])
X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, next, KFold(4).split(X1))
# Check that a warning is raised if the least populated class has too few
# members.
y = np.array([3, 3, -1, -1, 3])
skf_3 = StratifiedKFold(3)
assert_warns_message(Warning, "The least populated class",
next, skf_3.split(X2, y))
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
with warnings.catch_warnings():
warnings.simplefilter("ignore")
check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3)
# Check that errors are raised if all n_groups for individual
# classes are less than n_splits.
y = np.array([3, 3, -1, -1, 2])
assert_raises(ValueError, next, skf_3.split(X2, y))
# Error when number of folds is <= 1
assert_raises(ValueError, KFold, 0)
assert_raises(ValueError, KFold, 1)
error_string = ("k-fold cross-validation requires at least one"
" train/test split")
assert_raise_message(ValueError, error_string,
StratifiedKFold, 0)
assert_raise_message(ValueError, error_string,
StratifiedKFold, 1)
# When n_splits is not integer:
assert_raises(ValueError, KFold, 1.5)
assert_raises(ValueError, KFold, 2.0)
assert_raises(ValueError, StratifiedKFold, 1.5)
assert_raises(ValueError, StratifiedKFold, 2.0)
# When shuffle is not a bool:
assert_raises(TypeError, KFold, n_splits=4, shuffle=None)
def test_kfold_indices():
# Check all indices are returned in the test folds
X1 = np.ones(18)
kf = KFold(3)
check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
X2 = np.ones(17)
kf = KFold(3)
check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3)
# Check if get_n_splits returns the number of folds
assert_equal(5, KFold(5).get_n_splits(X2))
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
splits = KFold(2).split(X2[:-1])
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = KFold(2).split(X2)
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
X, y = np.ones(4), [1, 1, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
# Check if get_n_splits returns the number of folds
assert_equal(5, StratifiedKFold(5).get_n_splits(X, y))
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves class ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
X = np.ones(n_samples)
y = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in (False, True):
for train, test in StratifiedKFold(5, shuffle=shuffle).split(X, y):
assert_almost_equal(np.sum(y[train] == 4) / len(train), 0.10, 2)
assert_almost_equal(np.sum(y[train] == 0) / len(train), 0.89, 2)
assert_almost_equal(np.sum(y[train] == 1) / len(train), 0.01, 2)
assert_almost_equal(np.sum(y[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(y[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(y[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for i in range(11, 17):
kf = KFold(5).split(X=np.ones(i))
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
X = np.ones(17)
y = [0] * 3 + [1] * 14
for shuffle in (True, False):
cv = StratifiedKFold(3, shuffle=shuffle)
for i in range(11, 17):
skf = cv.split(X[:i], y[:i])
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_shuffle_kfold():
# Check the indices are shuffled properly
kf = KFold(3)
kf2 = KFold(3, shuffle=True, random_state=0)
kf3 = KFold(3, shuffle=True, random_state=1)
X = np.ones(300)
all_folds = np.zeros(300)
for (tr1, te1), (tr2, te2), (tr3, te3) in zip(
kf.split(X), kf2.split(X), kf3.split(X)):
for tr_a, tr_b in combinations((tr1, tr2, tr3), 2):
# Assert that there is no complete overlap
assert_not_equal(len(np.intersect1d(tr_a, tr_b)), len(tr1))
# Set all test indices in successive iterations of kf2 to 1
all_folds[te2] = 1
# Check that all indices are returned in the different test folds
assert_equal(sum(all_folds), 300)
def test_shuffle_kfold_stratifiedkfold_reproducibility():
# Check that when the shuffle is True multiple split calls produce the
# same split when random_state is set
X = np.ones(15) # Divisible by 3
y = [0] * 7 + [1] * 8
X2 = np.ones(16) # Not divisible by 3
y2 = [0] * 8 + [1] * 8
kf = KFold(3, shuffle=True, random_state=0)
skf = StratifiedKFold(3, shuffle=True, random_state=0)
for cv in (kf, skf):
np.testing.assert_equal(list(cv.split(X, y)), list(cv.split(X, y)))
np.testing.assert_equal(list(cv.split(X2, y2)), list(cv.split(X2, y2)))
kf = KFold(3, shuffle=True)
skf = StratifiedKFold(3, shuffle=True)
for cv in (kf, skf):
for data in zip((X, X2), (y, y2)):
try:
np.testing.assert_equal(list(cv.split(*data)),
list(cv.split(*data)))
except AssertionError:
pass
else:
raise AssertionError("The splits for data, %s, are same even "
"when random state is not set" % data)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
X_40 = np.ones(40)
y = [0] * 20 + [1] * 20
kf0 = StratifiedKFold(5, shuffle=True, random_state=0)
kf1 = StratifiedKFold(5, shuffle=True, random_state=1)
for (_, test0), (_, test1) in zip(kf0.split(X_40, y),
kf1.split(X_40, y)):
assert_not_equal(set(test0), set(test1))
check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact by computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.93) than that the non
# shuffling variant (around 0.81).
X, y = digits.data[:600], digits.target[:600]
model = SVC(C=10, gamma=0.005)
n_splits = 3
cv = KFold(n_splits=n_splits, shuffle=False)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.92, mean_score)
assert_greater(mean_score, 0.80)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = KFold(n_splits, shuffle=True, random_state=0)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
cv = KFold(n_splits, shuffle=True, random_state=1)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = StratifiedKFold(n_splits)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.93, mean_score)
assert_greater(mean_score, 0.80)
def test_shuffle_split():
ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X)
ss2 = ShuffleSplit(test_size=2, random_state=0).split(X)
ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X)
for typ in six.integer_types:
ss4 = ShuffleSplit(test_size=typ(2), random_state=0).split(X)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
X = np.arange(7)
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.2).split(X, y))
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y))
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 3, 2).split(X, y))
X = np.arange(9)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, StratifiedShuffleSplit, 3, 0.5, 0.6)
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 8, 0.6).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.6, 8).split(X, y))
# Train size or test size too small
assert_raises(ValueError, next,
StratifiedShuffleSplit(train_size=2).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(test_size=2).split(X, y))
def test_stratified_shuffle_split_respects_test_size():
y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2])
test_size = 5
train_size = 10
sss = StratifiedShuffleSplit(6, test_size=test_size, train_size=train_size,
random_state=0).split(np.ones(len(y)), y)
for train, test in sss:
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50),
np.concatenate([[i] * (100 + i) for i in range(11)])
]
for y in ys:
sss = StratifiedShuffleSplit(6, test_size=0.33,
random_state=0).split(np.ones(len(y)), y)
# this is how test-size is computed internally
# in _validate_shuffle_split
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_splits = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
prob = bf.pmf(count)
assert_true(prob > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
groups = np.array((n_samples // 2) * [0, 1])
splits = StratifiedShuffleSplit(n_splits=n_splits,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits_actual = 0
for train, test in splits.split(X=np.ones(n_samples), y=groups):
n_splits_actual += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits_actual, n_splits)
n_train, n_test = _validate_shuffle_split(
n_samples, test_size=1. / n_folds, train_size=1. - (1. / n_folds))
assert_equal(len(train), n_train)
assert_equal(len(test), n_test)
assert_equal(len(set(train).intersection(test)), 0)
group_counts = np.unique(groups)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(n_train + n_test, len(groups))
assert_equal(len(group_counts), 2)
ex_test_p = float(n_test) / n_samples
ex_train_p = float(n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
y = [0, 1, 2, 3] * 3 + [4, 5] * 5
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1,
test_size=0.5, random_state=0)
train, test = next(iter(sss.split(X=X, y=y)))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = PredefinedSplit(folds)
# n_splits is simply the no of unique folds
assert_equal(len(np.unique(folds)), ps.get_n_splits())
for train_ind, test_ind in ps.split():
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_group_shuffle_split():
groups = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4])]
for l in groups:
X = y = np.ones(len(l))
n_splits = 6
test_size = 1./3
slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(slo.get_n_splits(X, y, groups=l), n_splits)
l_unique = np.unique(l)
for train, test in slo.split(X, y, groups=l):
# First test: no train group is in the test set and vice versa
l_train_unique = np.unique(l[train])
l_test_unique = np.unique(l[test])
assert_false(np.any(np.in1d(l[train], l_test_unique)))
assert_false(np.any(np.in1d(l[test], l_train_unique)))
# Second test: train and test add up to all the data
assert_equal(l[train].size + l[test].size, l.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test:
# unique train and test groups are correct, +- 1 for rounding error
assert_true(abs(len(l_test_unique) -
round(test_size * len(l_unique))) <= 1)
assert_true(abs(len(l_train_unique) -
round((1.0 - test_size) * len(l_unique))) <= 1)
def test_leave_group_out_changing_groups():
# Check that LeaveOneGroupOut and LeavePGroupsOut work normally if
# the groups variable is changed before calling split
groups = np.array([0, 1, 2, 1, 1, 2, 0, 0])
X = np.ones(len(groups))
groups_changing = np.array(groups, copy=True)
lolo = LeaveOneGroupOut().split(X, groups=groups)
lolo_changing = LeaveOneGroupOut().split(X, groups=groups)
lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
groups_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
# n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3
assert_equal(3, LeavePGroupsOut(n_groups=2).get_n_splits(X, y, groups))
# n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups)
assert_equal(3, LeaveOneGroupOut().get_n_splits(X, y, groups))
def test_train_test_split_errors():
assert_raises(ValueError, train_test_split)
assert_raises(ValueError, train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# don't convert lists to anything else by default
split = train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = train_test_split(y, test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
@ignore_warnings
def train_test_split_pandas():
# check train_test_split doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_sparse():
# check that train_test_split converts scipy sparse matrices
# to csr, as stated in the documentation
X = np.arange(100).reshape((10, 10))
sparse_types = [csr_matrix, csc_matrix, coo_matrix]
for InputFeatureType in sparse_types:
X_s = InputFeatureType(X)
X_train, X_test = train_test_split(X_s)
assert_true(isinstance(X_train, csr_matrix))
assert_true(isinstance(X_test, csr_matrix))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = train_test_split(X_df)
def test_shufflesplit_errors():
# When the {test|train}_size is a float/invalid, error is raised at init
assert_raises(ValueError, ShuffleSplit, test_size=None, train_size=None)
assert_raises(ValueError, ShuffleSplit, test_size=2.0)
assert_raises(ValueError, ShuffleSplit, test_size=1.0)
assert_raises(ValueError, ShuffleSplit, test_size=0.1, train_size=0.95)
assert_raises(ValueError, ShuffleSplit, train_size=1j)
# When the {test|train}_size is an int, validation is based on the input X
# and happens at split(...)
assert_raises(ValueError, next, ShuffleSplit(test_size=11).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=10).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=8,
train_size=3).split(X))
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = ShuffleSplit(random_state=21)
assert_array_equal(list(a for a, b in ss.split(X)),
list(a for a, b in ss.split(X)))
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
train_test_split(X, y, test_size=0.2, random_state=42)
def test_check_cv():
X = np.ones(9)
cv = check_cv(3, classifier=False)
# Use numpy.testing.assert_equal which recursively compares
# lists of lists
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = check_cv(3, y_binary, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_binary)),
list(cv.split(X, y_binary)))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = check_cv(3, y_multiclass, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass)),
list(cv.split(X, y_multiclass)))
X = np.ones(5)
y_multilabel = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1],
[1, 1, 0, 1], [0, 0, 1, 0]])
cv = check_cv(3, y_multilabel, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = check_cv(3, y_multioutput, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
# Check if the old style classes are wrapped to have a split method
X = np.ones(9)
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv1 = check_cv(3, y_multiclass, classifier=True)
with warnings.catch_warnings(record=True):
from sklearn.cross_validation import StratifiedKFold as OldSKF
cv2 = check_cv(OldSKF(y_multiclass, n_folds=3))
np.testing.assert_equal(list(cv1.split(X, y_multiclass)),
list(cv2.split()))
assert_raises(ValueError, check_cv, cv="lolo")
def test_cv_iterable_wrapper():
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
with warnings.catch_warnings(record=True):
from sklearn.cross_validation import StratifiedKFold as OldSKF
cv = OldSKF(y_multiclass, n_folds=3)
wrapped_old_skf = _CVIterableWrapper(cv)
# Check if split works correctly
np.testing.assert_equal(list(cv), list(wrapped_old_skf.split()))
# Check if get_n_splits works correctly
assert_equal(len(cv), wrapped_old_skf.get_n_splits())
def test_group_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_groups = 15
n_samples = 1000
n_splits = 5
X = y = np.ones(n_samples)
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
groups = rng.randint(0, n_groups, n_samples)
ideal_n_groups_per_fold = n_samples // n_splits
len(np.unique(groups))
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
lkf = GroupKFold(n_splits=n_splits)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# Construct the test data
groups = np.array(['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David',
'Francis', 'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia'])
n_groups = len(np.unique(groups))
n_samples = len(groups)
n_splits = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
ideal_n_groups_per_fold = n_samples // n_splits
X = y = np.ones(n_samples)
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# Should fail if there are more folds than groups
groups = np.array([1, 1, 1, 2, 2])
X = y = np.ones(len(groups))
assert_raises_regexp(ValueError, "Cannot have number of splits.*greater",
next, GroupKFold(n_splits=3).split(X, y, groups))
def test_time_series_cv():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]
# Should fail if there are more folds than samples
assert_raises_regexp(ValueError, "Cannot have number of folds.*greater",
next,
TimeSeriesSplit(n_splits=7).split(X))
tscv = TimeSeriesSplit(2)
# Manually check that Time Series CV preserves the data
# ordering on toy datasets
splits = tscv.split(X[:-1])
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [2, 3])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3])
assert_array_equal(test, [4, 5])
splits = TimeSeriesSplit(2).split(X)
train, test = next(splits)
assert_array_equal(train, [0, 1, 2])
assert_array_equal(test, [3, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3, 4])
assert_array_equal(test, [5, 6])
# Check get_n_splits returns the correct number of splits
splits = TimeSeriesSplit(2).split(X)
n_splits_actual = len(list(splits))
assert_equal(n_splits_actual, tscv.get_n_splits())
assert_equal(n_splits_actual, 2)
def test_nested_cv():
# Test if nested cross validation works with different combinations of cv
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 5, 15)
cvs = [LeaveOneGroupOut(), LeaveOneOut(), GroupKFold(), StratifiedKFold(),
StratifiedShuffleSplit(n_splits=3, random_state=0)]
for inner_cv, outer_cv in combinations_with_replacement(cvs, 2):
gs = GridSearchCV(Ridge(), param_grid={'alpha': [1, .1]},
cv=inner_cv)
cross_val_score(gs, X=X, y=y, groups=groups, cv=outer_cv,
fit_params={'groups': groups})
def test_build_repr():
class MockSplitter:
def __init__(self, a, b=0, c=None):
self.a = a
self.b = b
self.c = c
def __repr__(self):
return _build_repr(self)
assert_equal(repr(MockSplitter(5, 6)), "MockSplitter(a=5, b=6, c=None)")
| bsd-3-clause |
pieleric/odemis | src/odemis/util/test/peak_test.py | 2 | 5135 | # -*- coding: utf-8 -*-
'''
Created on 21 Oct 2015
@author: Kimon Tsitsikas
Copyright © 2014 Kimon Tsitsikas, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with Odemis. If not, see http://www.gnu.org/licenses/.
'''
from __future__ import division
import logging
import numpy
from odemis.dataio import hdf5
from odemis.util import peak
import os
import unittest
import matplotlib.pyplot as plt
logging.getLogger().setLevel(logging.DEBUG)
PATH = os.path.dirname(__file__)
class TestPeak(unittest.TestCase):
"""
Test peak fitting in both energy and space domain for all the available fitting types
"""
def setUp(self):
data = hdf5.read_data(os.path.join(PATH, "spectrum_fitting.h5"))[1]
data = numpy.squeeze(data)
self.data = data
self.wl_in_meters = numpy.linspace(470e-9, 1030e-9, 167)
max_bw = data.shape[0] // 2
min_bw = (max_bw - data.shape[0]) + 1
self.wl_in_pixels = list(range(min_bw, max_bw + 1))
self._peak_fitter = peak.PeakFitter()
def test_peakfitting_energy(self):
data = self.data
wl = self.wl_in_meters
spec = data[:, 20, 20]
# Try gaussian
f = self._peak_fitter.Fit(spec, wl, type='gaussian_energy')
params, offset, curve_type = f.result()
self.assertTrue(1 <= len(params) < 20)
# Parameters should be positive
for pos, width, amplitude in params:
self.assertGreater(pos, 0)
self.assertGreater(width, 0)
self.assertGreater(amplitude, 0)
# Create curve
curve = peak.Curve(wl, params, offset)
self.assertEqual(len(curve), len(wl))
# TODO: find peaks on curve, and see we about the same peaks
wlhr = numpy.linspace(470e-9, 1030e-9, 512)
curve = peak.Curve(wlhr, params, offset, type='gaussian_energy')
self.assertEqual(len(curve), len(wlhr))
#plt.figure()
#plt.plot(wl, spec, 'r', wl, curve, 'r', linewidth=2)
# Try lorentzian
f = self._peak_fitter.Fit(spec, wl, type='lorentzian_energy')
params, offset, curve_type = f.result()
self.assertTrue(1 <= len(params) < 20)
# Parameters should be positive
for pos, width, amplitude in params:
self.assertGreater(pos, 0)
self.assertGreater(width, 0)
self.assertGreater(amplitude, 0)
curve = peak.Curve(wl, params, offset, type='lorentzian_energy')
self.assertEqual(len(curve), len(wl))
wlhr = numpy.linspace(470e-9, 1030e-9, 512)
curve = peak.Curve(wlhr, params, offset, type='gaussian_energy')
self.assertEqual(len(curve), len(wlhr))
#plt.figure()
#plt.plot(wl, spec, 'r', wl, curve, 'r', linewidth=2)
#plt.show(block=False)
# Assert wrong fitting type
self.assertRaises(KeyError, peak.Curve, wl, params, offset, type='wrongType')
def test_peakfitting_space(self):
data = self.data
wl = self.wl_in_pixels
spec = data[:, 20, 20]
# Try gaussian
f = self._peak_fitter.Fit(spec, wl, type='gaussian_space')
params, offset, curve_type = f.result()
self.assertTrue(1 <= len(params) < 20)
# pos parameter can be negative
for pos, width, amplitude in params:
self.assertGreater(pos, -1000)
self.assertGreater(width, 0)
self.assertGreater(amplitude, 0)
# Create curve
curve = peak.Curve(wl, params, offset)
self.assertEqual(len(curve), len(wl))
# TODO: find peaks on curve, and see we about the same peaks
wlhr = numpy.linspace(-125, 125, 180)
curve = peak.Curve(wlhr, params, offset, type='gaussian_space')
self.assertEqual(len(curve), len(wlhr))
# Try lorentzian
f = self._peak_fitter.Fit(spec, wl, type='lorentzian_space')
params, offset, curve_type = f.result()
self.assertTrue(1 <= len(params) < 20)
# pos parameter can be negative
for pos, width, amplitude in params:
self.assertGreater(pos, -1000)
self.assertGreater(width, 0)
self.assertGreater(amplitude, 0)
curve = peak.Curve(wl, params, offset, type='lorentzian_space')
self.assertEqual(len(curve), len(wl))
wlhr = numpy.linspace(-125, 125, 180)
curve = peak.Curve(wlhr, params, offset, type='lorentzian_space')
self.assertEqual(len(curve), len(wlhr))
# Assert wrong fitting type
self.assertRaises(KeyError, peak.Curve, wl, params, offset, type='wrongType')
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
HaroldMills/Vesper | setup.py | 1 | 4644 | """
Setup.py for Vesper pip package.
All of the commands below should be issued from the directory containing
this file.
To build the Vesper package:
python setup.py sdist bdist_wheel
To upload the Vesper package to the test Python package index:
python -m twine upload --repository-url https://test.pypi.org/legacy/ dist/*
To upload the Vesper package to the real Python package index:
python -m twine upload dist/*
To create a conda environment using a local Vesper package:
conda create -n test python=3.6
conda activate test
pip install dist/vesper-<version>.tar.gz
To create a conda environment using a Vesper package from the test PyPI:
conda create -n test python=3.6
conda activate test
pip install --extra-index-url https://test.pypi.org/simple/ vesper
To create a conda environment using a Vesper package from the real PyPI:
conda create -n test python=3.6
conda activate test
pip install vesper==<version>
To create a conda environment for Vesper development with TensorFlow 1.15.x:
conda create -n vesper-dev-tf1 python=3.6
conda activate vesper-dev-tf1
pip install bokeh django jsonschema matplotlib resampy ruamel_yaml skyfield sphinx sphinx_rtd_theme tensorflow~=1.15.0
To create a conda environment for Vesper development with TensorFlow 2.x:
conda create -n vesper-dev-tf2 python=3.9
conda activate vesper-dev-tf2
pip install bokeh django jsonschema matplotlib resampy ruamel_yaml skyfield sphinx sphinx_rtd_theme tensorflow
Whenever you modify plugin entry points, you must run:
python setup.py develop
for the plugin manager to be able to see the changes. If you don't do this,
you will see ImportError exceptions when the plugin manager tries to load
entry points that no longer exist.
"""
from importlib.machinery import SourceFileLoader
from pathlib import Path
from setuptools import find_packages, setup
def load_version_module(package_name):
module_name = f'{package_name}.version'
file_path = Path(f'{package_name}/version.py')
loader = SourceFileLoader(module_name, str(file_path))
return loader.load_module()
version = load_version_module('vesper')
setup(
name='vesper',
version=version.full_version,
description=(
'Software for acoustical monitoring of nocturnal bird migration.'),
url='https://github.com/HaroldMills/Vesper',
author='Harold Mills',
author_email='[email protected]',
license='MIT',
# TODO: Consider making the `vesper` Python package a native
# namespace package, allowing it to be split across multiple,
# separate distribution packages to allow optional ones (e.g.
# ones containing optional plugins) to be omitted from an
# installation. See
# https://packaging.python.org/guides/packaging-namespace-packages/
# for a discussion of namespace packages.
#
# Two important points from that discussion are that:
#
# 1. Every distribution package that is part of a `vesper`
# namespace package must omit `__init__.py` from its `vesper`
# package directory. Note that this will affect where the
# `__version__` package attribute is defined, pushing it down
# one level of the package hierarchy, into the `__init__.py`
# of each subpackage. See PEP 396 for more about `__version__`
# for namespace packages.
#
# 2. The `setup.py` file of every distribution package must use
# `setuptools.find_namespace_packages` rather than
# `setuptools.find_packages` to find its packages.
packages=find_packages(
# We exclude the unit test packages since some of them contain a
# lot of data, for example large audio files.
exclude=['tests', 'tests.*', '*.tests.*', '*.tests']
),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
install_requires=[
'django~=3.2.0',
'jsonschema~=3.2',
'resampy',
'ruamel_yaml',
'skyfield~=1.38',
'tensorflow~=2.5',
],
entry_points={
'console_scripts': [
'vesper_admin=vesper.django.manage:main',
'vesper_recorder=vesper.scripts.vesper_recorder:_main',
'vesper_play_recorder_test_signal=vesper.scripts.play_recorder_test_signal:_main',
'vesper_show_audio_input_devices=vesper.scripts.show_audio_input_devices:_main',
]
},
include_package_data=True,
zip_safe=False
)
| mit |
jpautom/scikit-learn | sklearn/utils/tests/test_extmath.py | 19 | 21979 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.01)
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(300, 1000, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
print(error_2 - error_20)
assert_greater(np.abs(error_2 - error_20), 100)
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
print(error_2 - error)
assert_greater(15, np.abs(error_2 - error))
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
# ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
# ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
# ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
# min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = x1 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A1 = x2 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A = np.vstack((A0, A1))
# Older versions of numpy have different precision
# In some old version, np.var is not stable
if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6:
stable_var = np_var
else:
stable_var = two_pass_var
# Naive one pass var: >tol (=1063)
assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol)
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert_equal(n, A.shape[0])
# the mean is also slightly unstable
assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6)
assert_greater(np.abs(stable_var(A) - var).max(), tol)
# Robust implementation: <tol (177)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert_greater(tol, np.abs(stable_var(A) - var).max())
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
| bsd-3-clause |
phoebe-project/phoebe2-docs | 2.3/examples/diher_misaligned.py | 2 | 4148 | #!/usr/bin/env python
# coding: utf-8
# DI Her: Misaligned Binary
# ============================
#
# In this example, we'll reproduce Figure 8 in the misalignment release paper ([Horvat et al. 2018](http://phoebe-project.org/publications/2018Horvat+)).
#
# <img src="horvat+18_fig8.png" alt="Figure 8" width="400px"/>
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
# In[1]:
#!pip install -I "phoebe>=2.3,<2.4"
# As always, let's do imports and initialize a logger and a new bundle.
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger('error')
b = phoebe.default_binary()
# System Parameters
# ------------
#
# We'll adopt and set parameters from the following sources:
# * Albrecht et al. (2009), Nature: https://arxiv.org/pdf/0909.2861
# * https://en.wikipedia.org/wiki/DI_Herculis
# * Claret et al (2010) https://arxiv.org/pdf/1002.2949.pdf
# In[3]:
Nt = 2000
b.set_value('t0_supconj@orbit', 2442233.3481)
b.set_value('vgamma@system', 9.1) # [km/s] (Albrecht et al. 2009)
b.set_value('ntriangles@primary', Nt)
b.set_value('ntriangles@secondary', Nt)
mass1 = 5.1 # [M_sun] (Albrecht et al. 2009)
mass2 = 4.4 # [M_sun] (Albrecht et al. 2009)
P = 10.550164 # [d] (Albrecht et al. 2009)
mu_sun = 1.32712440018e20 # = G M_sun [m3 s^-2], Wiki Standard_gravitational_parameter
R_sun = 695700000 # [m] Wiki Sun
sma = (mu_sun*(mass1 + mass2)*(P*86400/(2*np.pi))**2)**(1./3)/R_sun # Kepler equation
incl = 89.3 # deg (Albrecht et al. 2009)
vp_sini = 109 # [km/s] (Albrecht et al. 2009)
vs_sini = 117 # [km/s] (Albrecht et al. 2009)
Rp = 2.68 # [R_sun] (Albrecht et al. 2009)
Rs = 2.48 # [R_sun] (Albrecht et al. 2009)
sini = np.sin(np.pi*incl/180)
vp = vp_sini*86400/sini # [km/s]
vs = vs_sini*86400/sini # [km/s]
Pp = 2*np.pi*Rp*R_sun/1000/vp
Ps = 2*np.pi*Rs*R_sun/1000/vs
Fp = P/Pp
Fs = P/Ps
b.set_value('q', mass2/mass1)
b.set_value('incl@binary', incl) # (Albrecht et al. 2009)
b.set_value('sma@binary', sma) # calculated
b.set_value('ecc@binary', 0.489) # (Albrecht et al. 2009)
b.set_value('per0@binary', 330.2) # (Albrecht et al. 2009)
b.set_value('period@binary', P) # calculated
b.set_value('syncpar@primary', Fp) # calculated
b.set_value('syncpar@secondary', Fs) # calculated
b.set_value('requiv@primary', Rp) # !!! requiv (Albrecht et al. 2009)
b.set_value('requiv@secondary', Rs) # !!! requiv (Albrecht et al. 2009)
b.set_value('teff@primary', 17300) # Wiki DI_Herculis
b.set_value('teff@secondary', 15400) # Wiki DI_Herculis
b.set_value('gravb_bol@primary', 1.)
b.set_value('gravb_bol@secondary', 1.)
# beta = 72 deg (Albrecht et al. 2009)
dOmega_p = 72
di_p = 62 - incl
b.set_value('pitch@primary', di_p) # di
b.set_value('yaw@primary', dOmega_p) # dOmega
# beta = - 84 deg (Albrecht et al. 2009)
dOmega_s = -84
di_s = 100 - incl
b.set_value('pitch@secondary', di_s) # di
b.set_value('yaw@secondary', dOmega_s) # dOmega
b.set_value_all('atm','extern_planckint')
b.set_value_all('irrad_method', 'none')
# Datasets
# ---------------
#
# Let's compute an LC and RV dataset sampled at 200 points in phase (with some aliasing).
# In[4]:
n = 200
times = b.to_time(np.linspace(-0.05, 1.05, n))
b.add_dataset('lc', times=times, dataset='lc01', ld_mode='manual', ld_func='logarithmic', ld_coeffs = [0.5,0.5])
b.add_dataset('rv', times=times, dataset='rv01', ld_mode='manual', ld_func='logarithmic', ld_coeffs = [0.5,0.5])
# Compute
# --------------
# In[5]:
b.run_compute(ltte=False)
# Plotting
# -------------
# In[6]:
afig, mplfig = b.plot(kind='lc', show=True)
# In[7]:
afig, mplfig = b.plot(kind='rv', show=True)
# In[ ]:
| gpl-3.0 |
deepesch/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 155 | 8058 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed, [])
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
sumspr/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 85 | 8565 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) | bsd-3-clause |
AllenDowney/CompStats | hypothesis.py | 75 | 10162 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import nsfg
import nsfg2
import first
import thinkstats2
import thinkplot
import copy
import random
import numpy as np
import matplotlib.pyplot as pyplot
class CoinTest(thinkstats2.HypothesisTest):
"""Tests the hypothesis that a coin is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
heads, tails = data
test_stat = abs(heads - tails)
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
heads, tails = self.data
n = heads + tails
sample = [random.choice('HT') for _ in range(n)]
hist = thinkstats2.Hist(sample)
data = hist['H'], hist['T']
return data
class DiffMeansPermute(thinkstats2.HypothesisTest):
"""Tests a difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = abs(group1.mean() - group2.mean())
return test_stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
group1, group2 = self.data
self.n, self.m = len(group1), len(group2)
self.pool = np.hstack((group1, group2))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
class DiffMeansOneSided(DiffMeansPermute):
"""Tests a one-sided difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.mean() - group2.mean()
return test_stat
class DiffStdPermute(DiffMeansPermute):
"""Tests a one-sided difference in standard deviation by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.std() - group2.std()
return test_stat
class CorrelationPermute(thinkstats2.HypothesisTest):
"""Tests correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
xs, ys = data
test_stat = abs(thinkstats2.Corr(xs, ys))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
xs, ys = self.data
xs = np.random.permutation(xs)
return xs, ys
class DiceTest(thinkstats2.HypothesisTest):
"""Tests whether a six-sided die is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum(abs(observed - expected))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
n = sum(self.data)
values = [1,2,3,4,5,6]
rolls = np.random.choice(values, n, replace=True)
hist = thinkstats2.Hist(rolls)
freqs = hist.Freqs(values)
return freqs
class DiceChiTest(DiceTest):
"""Tests a six-sided die using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum((observed - expected)**2 / expected)
return test_stat
class PregLengthTest(thinkstats2.HypothesisTest):
"""Tests difference in pregnancy length using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: pair of lists of pregnancy lengths
"""
firsts, others = data
stat = self.ChiSquared(firsts) + self.ChiSquared(others)
return stat
def ChiSquared(self, lengths):
"""Computes the chi-squared statistic.
lengths: sequence of lengths
returns: float
"""
hist = thinkstats2.Hist(lengths)
observed = np.array(hist.Freqs(self.values))
expected = self.expected_probs * len(lengths)
stat = sum((observed - expected)**2 / expected)
return stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
firsts, others = self.data
self.n = len(firsts)
self.pool = np.hstack((firsts, others))
pmf = thinkstats2.Pmf(self.pool)
self.values = range(35, 44)
self.expected_probs = np.array(pmf.Probs(self.values))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
def RunDiceTest():
"""Tests whether a die is fair.
"""
data = [8, 9, 19, 5, 8, 11]
dt = DiceTest(data)
print('dice test', dt.PValue(iters=10000))
dt = DiceChiTest(data)
print('dice chi test', dt.PValue(iters=10000))
def FalseNegRate(data, num_runs=1000):
"""Computes the chance of a false negative based on resampling.
data: pair of sequences
num_runs: how many experiments to simulate
returns: float false negative rate
"""
group1, group2 = data
count = 0
for i in range(num_runs):
sample1 = thinkstats2.Resample(group1)
sample2 = thinkstats2.Resample(group2)
ht = DiffMeansPermute((sample1, sample2))
p_value = ht.PValue(iters=101)
if p_value > 0.05:
count += 1
return count / num_runs
def PrintTest(p_value, ht):
"""Prints results from a hypothesis test.
p_value: float
ht: HypothesisTest
"""
print('p-value =', p_value)
print('actual =', ht.actual)
print('ts max =', ht.MaxTestStat())
def RunTests(data, iters=1000):
"""Runs several tests on the given data.
data: pair of sequences
iters: number of iterations to run
"""
# test the difference in means
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute two-sided')
PrintTest(p_value, ht)
ht.PlotCdf()
thinkplot.Save(root='hypothesis1',
title='Permutation test',
xlabel='difference in means (weeks)',
ylabel='CDF',
legend=False)
# test the difference in means one-sided
ht = DiffMeansOneSided(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute one-sided')
PrintTest(p_value, ht)
# test the difference in std
ht = DiffStdPermute(data)
p_value = ht.PValue(iters=iters)
print('\nstd permute one-sided')
PrintTest(p_value, ht)
def ReplicateTests():
"""Replicates tests with the new NSFG data."""
live, firsts, others = nsfg2.MakeFrames()
# compare pregnancy lengths
print('\nprglngth2')
data = firsts.prglngth.values, others.prglngth.values
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
print('\nbirth weight 2')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation 2')
PrintTest(p_value, ht)
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared 2')
PrintTest(p_value, ht)
def main():
thinkstats2.RandomSeed(17)
# run the coin test
ct = CoinTest((140, 110))
pvalue = ct.PValue()
print('coin test p-value', pvalue)
# compare pregnancy lengths
print('\nprglngth')
live, firsts, others = first.MakeFrames()
data = firsts.prglngth.values, others.prglngth.values
RunTests(data)
# compare birth weights
print('\nbirth weight')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation')
print('n=', len(live2))
PrintTest(p_value, ht)
# run the dice test
RunDiceTest()
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared')
PrintTest(p_value, ht)
# compute the false negative rate for difference in pregnancy length
data = firsts.prglngth.values, others.prglngth.values
neg_rate = FalseNegRate(data)
print('false neg rate', neg_rate)
# run the tests with new nsfg data
ReplicateTests()
if __name__ == "__main__":
main()
| mit |
hollerith/trading-with-python | cookbook/workingWithDatesAndTime.py | 77 | 1551 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 17:45:02 2011
@author: jev
"""
import time
import datetime as dt
from pandas import *
from pandas.core import datetools
# basic functions
print 'Epoch start: %s' % time.asctime(time.gmtime(0))
print 'Seconds from epoch: %.2f' % time.time()
today = dt.date.today()
print type(today)
print 'Today is %s' % today.strftime('%Y.%m.%d')
# parse datetime
d = dt.datetime.strptime('20120803 21:59:59',"%Y%m%d %H:%M:%S")
# time deltas
someDate = dt.date(2011,8,1)
delta = today - someDate
print 'Delta :', delta
# calculate difference in dates
delta = dt.timedelta(days=20)
print 'Today-delta=', today-delta
t = dt.datetime(*time.strptime('3/30/2004',"%m/%d/%Y")[0:5])
# the '*' operator unpacks the tuple, producing the argument list.
print t
# print every 3d wednesday of the month
for month in xrange(1,13):
t = dt.date(2013,month,1)+datetools.relativedelta(months=1)
offset = datetools.Week(weekday=4)
if t.weekday()<>4:
t_new = t+3*offset
else:
t_new = t+2*offset
t_new = t_new-datetools.relativedelta(days=30)
print t_new.strftime("%B, %d %Y (%A)")
#rng = DateRange(t, t+datetools.YearEnd())
#print rng
# create a range of times
start = dt.datetime(2012,8,1)+datetools.relativedelta(hours=9,minutes=30)
end = dt.datetime(2012,8,1)+datetools.relativedelta(hours=22)
rng = date_range(start,end,freq='30min')
for r in rng: print r.strftime("%Y%m%d %H:%M:%S") | bsd-3-clause |
111t8e/h2o-2 | py/testdir_single_jvm/test_summary2_unifiles.py | 9 | 10223 | import unittest, time, sys, random, math, getpass
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_util, h2o_browse as h2b, h2o_print as h2p
import h2o_summ
DO_TRY_SCIPY = False
if getpass.getuser()=='kevin' or getpass.getuser()=='jenkins':
DO_TRY_SCIPY = True
DO_MEDIAN = True
# FIX!. we seem to lose accuracy with fewer bins -> more iterations. Maybe we're leaking or ??
# this test failed (if run as user kevin) with 10 bins
MAX_QBINS = 1000 # pass
MAX_QBINS = 1000 # pass
# this one doesn't fail with 10 bins
# this failed. interestingly got same number as 1000 bin summary2 (the 7.433..
# on runifA.csv (2nd col?)
# MAX_QBINS = 20
# Exception: h2o quantile multipass is not approx. same as sort algo. h2o_util.assertApproxEqual failed comparing 7.43337413296 and 8.26268245. {'tol': 2e-07}.
MAX_QBINS = 27
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
# h2o.sleep(3600)
h2o.tear_down_cloud()
def test_summary2_unifiles(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
# new with 1000 bins. copy expected from R
tryList = [
('cars.csv', 'c.hex', [
(None, None,None,None,None,None),
('economy (mpg)', None,None,None,None,None),
('cylinders', None,None,None,None,None),
],
),
('runifA.csv', 'A.hex', [
(None, 1.00, 25.00, 50.00, 75.00, 100.0),
('x', -99.9, -44.7, 8.26, 58.00, 91.7),
],
),
# colname, (min, 25th, 50th, 75th, max)
('runif.csv', 'x.hex', [
(None, 1.00, 5000.0, 10000.0, 15000.0, 20000.00),
('D', -5000.00, -3735.0, -2443, -1187.0, 99.8),
('E', -100000.0, -49208.0, 1783.8, 50621.9, 100000.0),
('F', -1.00, -0.4886, 0.00868, 0.5048, 1.00),
],
),
('runifB.csv', 'B.hex', [
(None, 1.00, 2501.00, 5001.00, 7501.00, 10000.00),
('x', -100.00, -50.1, 0.974, 51.7, 100,00),
],
),
('runifC.csv', 'C.hex', [
(None, 1.00, 25002.00, 50002.00, 75002.00, 100000.00),
('x', -100.00, -50.45, -1.135, 49.28, 100.00),
],
),
]
timeoutSecs = 15
trial = 1
n = h2o.nodes[0]
lenNodes = len(h2o.nodes)
timeoutSecs = 60
for (csvFilename, hex_key, expectedCols) in tryList:
csvPathname = csvFilename
csvPathnameFull = h2i.find_folder_and_filename('smalldata', csvPathname, returnFullPath=True)
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname,
schema='put', hex_key=hex_key, timeoutSecs=10, doSummary=False)
print "Parse result['destination_key']:", parseResult['destination_key']
# We should be able to see the parse result?
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
numRows = inspect["numRows"]
numCols = inspect["numCols"]
# okay to get more cols than we want
# okay to vary MAX_QBINS because we adjust the expected accuracy
summaryResult = h2o_cmd.runSummary(key=hex_key, max_qbins=MAX_QBINS)
h2o.verboseprint("summaryResult:", h2o.dump_json(summaryResult))
summaries = summaryResult['summaries']
scipyCol = 0
for expected, column in zip(expectedCols, summaries):
colname = column['colname']
if expected[0]:
self.assertEqual(colname, expected[0]), colname, expected[0]
else:
# if the colname is None, skip it (so we don't barf on strings on the h2o quantile page
scipyCol += 1
continue
quantile = 0.5 if DO_MEDIAN else .999
# h2o has problem if a list of columns (or dictionary) is passed to 'column' param
q = h2o.nodes[0].quantiles(source_key=hex_key, column=column['colname'],
quantile=quantile, max_qbins=MAX_QBINS, multiple_pass=2, interpolation_type=7) # for comparing to summary2
qresult = q['result']
qresult_single = q['result_single']
h2p.blue_print("h2o quantiles result:", qresult)
h2p.blue_print("h2o quantiles result_single:", qresult_single)
h2p.blue_print("h2o quantiles iterations:", q['iterations'])
h2p.blue_print("h2o quantiles interpolated:", q['interpolated'])
print h2o.dump_json(q)
# ('', '1.00', '25002.00', '50002.00', '75002.00', '100000.00'),
coltype = column['type']
nacnt = column['nacnt']
stats = column['stats']
stattype= stats['type']
print stattype
# FIX! we should compare mean and sd to expected?
# enums don't have mean or sd?
if stattype!='Enum':
mean = stats['mean']
sd = stats['sd']
zeros = stats['zeros']
mins = stats['mins']
maxs = stats['maxs']
print "colname:", colname, "mean (2 places):", h2o_util.twoDecimals(mean)
print "colname:", colname, "std dev. (2 places):", h2o_util.twoDecimals(sd)
pct = stats['pct']
print "pct:", pct
print ""
# the thresholds h2o used, should match what we expected
expectedPct= [0.01, 0.05, 0.1, 0.25, 0.33, 0.5, 0.66, 0.75, 0.9, 0.95, 0.99]
pctile = stats['pctile']
# figure out the expected max error
# use this for comparing to sklearn/sort
if expected[1] and expected[5]:
expectedRange = expected[5] - expected[1]
# because of floor and ceil effects due we potentially lose 2 bins (worst case)
# the extra bin for the max value, is an extra bin..ignore
expectedBin = expectedRange/(MAX_QBINS-2)
maxErr = 0.5 * expectedBin # should we have some fuzz for fp?
else:
print "Test won't calculate max expected error"
maxErr = 0
# hack..assume just one None is enough to ignore for cars.csv
if expected[1]:
h2o_util.assertApproxEqual(mins[0], expected[1], tol=maxErr, msg='min is not approx. expected')
if expected[2]:
h2o_util.assertApproxEqual(pctile[3], expected[2], tol=maxErr, msg='25th percentile is not approx. expected')
if expected[3]:
h2o_util.assertApproxEqual(pctile[5], expected[3], tol=maxErr, msg='50th percentile (median) is not approx. expected')
if expected[4]:
h2o_util.assertApproxEqual(pctile[7], expected[4], tol=maxErr, msg='75th percentile is not approx. expected')
if expected[5]:
h2o_util.assertApproxEqual(maxs[0], expected[5], tol=maxErr, msg='max is not approx. expected')
hstart = column['hstart']
hstep = column['hstep']
hbrk = column['hbrk']
hcnt = column['hcnt']
for b in hcnt:
# should we be able to check for a uniform distribution in the files?
e = .1 * numRows
# self.assertAlmostEqual(b, .1 * rowCount, delta=.01*rowCount,
# msg="Bins not right. b: %s e: %s" % (b, e))
if stattype!='Enum':
pt = h2o_util.twoDecimals(pctile)
print "colname:", colname, "pctile (2 places):", pt
mx = h2o_util.twoDecimals(maxs)
mn = h2o_util.twoDecimals(mins)
print "colname:", colname, "maxs: (2 places):", mx
print "colname:", colname, "mins: (2 places):", mn
# FIX! we should do an exec and compare using the exec quantile too
actual = mn[0], pt[3], pt[5], pt[7], mx[0]
print "min/25/50/75/max colname:", colname, "(2 places):", actual
print "maxs colname:", colname, "(2 places):", mx
print "mins colname:", colname, "(2 places):", mn
# don't check if colname is empty..means it's a string and scipy doesn't parse right?
# need to ignore the car names
if colname!='' and expected[scipyCol]:
# don't do for enums
# also get the median with a sort (h2o_summ.percentileOnSortedlist()
h2o_summ.quantile_comparisons(
csvPathnameFull,
skipHeader=True,
col=scipyCol,
datatype='float',
quantile=0.5 if DO_MEDIAN else 0.999,
# FIX! ignore for now
h2oSummary2=pctile[5 if DO_MEDIAN else 10],
h2oQuantilesApprox=qresult_single,
h2oQuantilesExact=qresult,
h2oSummary2MaxErr=maxErr,
)
if False and h2o_util.approxEqual(pctile[5], 0.990238116744, tol=0.002, msg='stop here'):
raise Exception("stopping to look")
scipyCol += 1
trial += 1
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
mikeireland/chronostar | scripts/utils/animate_results_save.py | 1 | 4061 | """
For efficiency, if a video already exists, this script will
skip over it. So if you wish to update a video, delete it
first, and then run this script.
usage:
in the base results directory (i.e. you should be able to see directories 1,
2, ... etc) execute:
python animate_results_save.py path_to_your_data.fits [N_MAX_COMPS] [dim1] [dim2]
The order of the arguments is important. An example would be:
python animate_results_save.py star_data.fits 8 X U
And the script will generate a video for each EM run, (1, 2A, 3A, 3B, 4A, 4B, 4C, ... 8A, 8B ... 8G),
plotted in X and U. It can take quite a while, since so many plots must be generated.
"""
import os
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from chronostar.component import SphereComponent
from chronostar import tabletool
N_MAX_COMPS=20
N_MAX_ITERS=200
LABELS='XYZUVW'
UNITS=3*['pc'] + 3*['km/s']
dim1 = 0
dim2 = 3
def animate(i):
COLOURS = ['blue', 'red', 'orange', 'purple', 'brown', 'green', 'black']
iterdir = base_dir + 'iter{:02}/'.format(i)
print('[animate]: In ', iterdir)
comps = SphereComponent.load_raw_components(iterdir + 'best_comps.npy')
membs = np.load(iterdir + 'membership.npy')
plt.clf()
plt.title('Iter {}'.format(i))
plt.xlabel('{} [{}]'.format(LABELS[dim1], UNITS[dim1]))
plt.ylabel('{} [{}]'.format(LABELS[dim2], UNITS[dim2]))
comp_assignment = np.argmax(membs, axis=1)
# Plot color coded component members
for comp_ix in range(len(comps)):
memb_mask = np.where(comp_assignment == comp_ix)
plt.plot(data['means'][memb_mask,dim1], data['means'][memb_mask,dim2], '.',
color=COLOURS[comp_ix%len(COLOURS)], alpha=0.6, markersize=10)
# Plot background stars
bg_mask = np.where(comp_assignment == len(comps))
plt.plot(data['means'][bg_mask,dim1], data['means'][bg_mask,dim2], '.',
color='grey', alpha=0.3, markersize=2)
[c.plot(dim1, dim2, color=COLOURS[comp_ix%len(COLOURS)]) for comp_ix, c in enumerate(comps)]
# [c.plot(dim1, dim2) for c in comps]
Writer = animation.writers['ffmpeg']
writer = Writer(fps=20, metadata=dict(artist='Me'), bitrate=1800)
if len(sys.argv) < 2:
print('USAGE: python animate_results_save.py datafile.fits [max_comps]')
sys.exit()
datafile = sys.argv[1]
if len(sys.argv) > 2:
N_MAX_COMPS = int(sys.argv[2])
if len(sys.argv) > 4:
dim1 = sys.argv[3]
dim2 = sys.argv[4]
try:
dim1 = int(dim1)
except ValueError:
dim1 = ord(dim1.upper()) - ord('X')
try:
dim2 = int(dim2)
except ValueError:
dim2 = ord(dim2.upper()) - ord('X')
try:
data = tabletool.build_data_dict_from_table(datafile)
except:
data = tabletool.build_data_dict_from_table(datafile, historical=True)
fig = plt.figure(figsize=(10,6))
ani = matplotlib.animation.FuncAnimation(fig, animate, frames=N_MAX_ITERS, repeat=True)
for i in range(1,N_MAX_COMPS+1):
if i == 1:
base_dir = '1/'
save_filename = '1_{}{}.mp4'.format(LABELS[dim1],LABELS[dim2])
if os.path.isdir(base_dir) and not os.path.isfile(save_filename):
print('Going into ', base_dir)
try:
ani.save(save_filename, writer=writer)
except: # FileNotFoundError: # Python2 consistent
pass
else:
subdirs = [f.name for f in os.scandir(str(i)) if f.is_dir()]
print('Subdirs: ', subdirs)
for subdir in subdirs:
# char = chr(ord('A')+j)
# base_dir = '{}/{}/'.format(i,char)
base_dir = '{}/{}/'.format(i,subdir)
save_filename = '{}_{}_{}{}.mp4'.format(i,subdir,LABELS[dim1],LABELS[dim2])
if os.path.isdir(base_dir) and not os.path.isfile(save_filename):
print('Going into ', base_dir)
try:
ani.save(save_filename, writer=writer)
except: # FileNotFoundError: # Python2 consistent
pass
| mit |
henningjp/CoolProp | wrappers/Python/CoolProp/tests/test_plots.py | 2 | 4596 | import numpy as np
import matplotlib.pyplot as plt
def test_back_compatibility():
fluid_ref = 'R290'
def Ts_plot_tests():
from CoolProp.Plots import Ts
Ts(fluid_ref, show=False)
from matplotlib import pyplot
fig = pyplot.figure(2)
ax = fig.gca()
Ts(fluid_ref, show=False, axis=ax)
plt.close()
Ts(fluid_ref, show=False, Tmin=200, Tmax=300)
plt.close()
def Ph_plot_tests():
from CoolProp.Plots import Ph
Ph(fluid_ref, show=False)
from matplotlib import pyplot
fig = pyplot.figure(2)
ax = fig.gca()
Ph(fluid_ref, show=False, axis=ax)
plt.close()
Ph(fluid_ref, show=False, Tmin=200, Tmax=300)
plt.close()
def PT_plot_tests():
from CoolProp.Plots import PT
PT(fluid_ref, show=False)
from matplotlib import pyplot
fig = pyplot.figure(2)
ax = fig.gca()
PT(fluid_ref, show=False, axis=ax)
plt.close()
PT(fluid_ref, show=False, Tmin=200, Tmax=300)
plt.close()
def Ps_plot_tests():
from CoolProp.Plots import Ps
Ps(fluid_ref, show=False)
from matplotlib import pyplot
fig = pyplot.figure(2)
ax = fig.gca()
Ps(fluid_ref, show=False, axis=ax)
plt.close()
Ps(fluid_ref, show=False, Tmin=200, Tmax=300)
plt.close()
def Prho_plot_tests():
from CoolProp.Plots import Prho
Prho(fluid_ref, show=False)
from matplotlib import pyplot
fig = pyplot.figure(2)
ax = fig.gca()
Prho(fluid_ref, show=False, axis=ax)
plt.close()
Prho(fluid_ref, show=False, Tmin=200, Tmax=300)
plt.close()
def Trho_plot_tests():
from CoolProp.Plots import Trho
Trho(fluid_ref, show=False)
from matplotlib import pyplot
fig = pyplot.figure(2)
ax = fig.gca()
Trho(fluid_ref, show=False, axis=ax)
plt.close()
Trho(fluid_ref, show=False, Tmin=200, Tmax=300)
plt.close()
def hs_plot_tests():
from CoolProp.Plots import hs
hs(fluid_ref, show=False)
from matplotlib import pyplot
fig = pyplot.figure(2)
ax = fig.gca()
hs(fluid_ref, show=False, axis=ax)
plt.close()
hs(fluid_ref, show=False, Tmin=200, Tmax=300)
plt.close()
def Isolines_plot_tests():
from matplotlib import pyplot
from CoolProp.Plots import Ts, drawIsoLines
ax = Ts(fluid_ref)
#ax.set_xlim([-0.5, 1.5])
#ax.set_ylim([300, 530])
quality = drawIsoLines(fluid_ref, 'Ts', 'Q', [0.3, 0.5, 0.7, 0.8], axis=ax)
isobars = drawIsoLines(fluid_ref, 'Ts', 'P', [100, 2000], num=5, axis=ax)
isochores = drawIsoLines(fluid_ref, 'Ts', 'D', [2, 600], num=7, axis=ax)
pyplot.close()
Ts_plot_tests()
Ph_plot_tests()
Ps_plot_tests()
PT_plot_tests()
Prho_plot_tests()
Trho_plot_tests()
hs_plot_tests()
Isolines_plot_tests()
def test_new_code():
fluid_ref = 'Water'
def Ts_plot_tests():
from CoolProp.Plots import PropsPlot
PP = PropsPlot(fluid_ref, 'Ts')
plt.close()
def Ph_plot_tests():
from CoolProp.Plots import PropsPlot
PP = PropsPlot(fluid_ref, 'Ph')
plt.close()
def Isolines_plot_tests():
from CoolProp.Plots import PropsPlot
PP = PropsPlot(fluid_ref, 'Ts')
#plt.set_axis_limits([-0.5, 1.5, 300, 530])
PP.draw_isolines('Q', [0.3, 0.5, 0.7, 0.8])
PP.draw_isolines('P', [100, 2000], num=5)
PP.draw_isolines('D', [2, 600], num=7)
plt.close()
def Graph_annotations():
from CoolProp.Plots import PropsPlot, IsoLines
PP = PropsPlot(fluid_ref, 'Ts')
PP.draw_isolines('Q', [0.3, 0.5, 0.7, 0.8])
PP.draw_isolines('P', [100, 2000], num=5)
PP.draw_isolines('D', [2, 600], num=7)
plt.title('New Title')
PP.xlabel('New x label')
PP.ylabel('New y label')
PP = IsoLines(fluid_ref, 'Ts', 'P')
PP.draw_isolines([100, 2000], num=5)
plt.close()
def Mixture():
from CoolProp.Plots import PropsPlot
PP = PropsPlot('REFPROP-MIX:R32[0.47319469]&R125[0.2051091]&R134a[0.32169621]', 'TD')
PP._plot_default_annotations()
plt.close()
Ts_plot_tests()
Ph_plot_tests()
Isolines_plot_tests()
Graph_annotations()
Mixture()
if __name__ == '__main__':
import nose
nose.runmodule()
| mit |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/feature_extraction/tests/test_text.py | 39 | 36062 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.random import choice
from sklearn.utils.testing import (assert_equal, assert_false, assert_true,
assert_not_equal, assert_almost_equal,
assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry, SkipTest)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(choice(vocab_words, size=5, replace=False,
random_state=rng))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = choice(vocab_words, size=5, replace=False, random_state=rng)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
def test_vectorizer_string_object_as_input():
message = ("Iterable over raw text documents expected, "
"string object received.")
for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
assert_raise_message(
ValueError, message, vec.fit_transform, "hello world!")
assert_raise_message(
ValueError, message, vec.fit, "hello world!")
assert_raise_message(
ValueError, message, vec.transform, "hello world!")
| bsd-3-clause |
grehujt/SmallPythonProjects | TopicModeling/solution.py | 1 | 2066 |
from gensim import corpora, models, matutils
import matplotlib.pyplot as plt
# https://radimrehurek.com/gensim/corpora/bleicorpus.html
corpus = corpora.BleiCorpus('./data/ap.dat', './data/vocab.txt')
print type(corpus)
print dir(corpus)
# https://radimrehurek.com/gensim/models/ldamodel.html
model = models.ldamodel.LdaModel(corpus, num_topics=100, id2word=corpus.id2word)
# doc = corpus.docbyoffset(0)
# topics = model[doc]
# numTopics = [len(model[d]) for d in corpus]
# hist1= plt.hist(numTopics, alpha=0.5, bins=100, label='alpha=1/topicCount')
# plt.xlabel('num of topics')
# plt.ylabel('num of docs')
# plt.savefig('pics/pic0.png')
# exit(0)
# model = models.ldamodel.LdaModel(corpus, num_topics=100, id2word=corpus.id2word, alpha=1)
# topics = model[doc]
# numTopics = [len(model[d]) for d in corpus]
# hist2 = plt.hist(numTopics, alpha=0.5, bins=100, label='alpha=1')
# plt.legend()
# plt.xlabel('num of topics')
# plt.ylabel('num of docs')
# plt.savefig('pics/pic1.png')
# find most discussed topic
topics = matutils.corpus2dense(model[corpus], num_terms=model.num_topics)
weight = topics.sum(1)
tIdices = weight.argsort()
h = '''| topic id | words |
| :-------------: |:-------------:|
'''
print h
for i in xrange(-1, -11, -1):
words = model.show_topic(tIdices[i])
print '|', tIdices[i], '|', ' '.join(s[0] for s in sorted(words, key=lambda d: d[1], reverse=True)), '|'
from scipy.spatial import distance
# calculate pairwise distance between topic vectors
pdists = distance.squareform(distance.pdist(topics))
# excluding self
largest = pdists.max()
for i in xrange(pdists.shape[0]):
pdists[i][i] = largest + 1
import re
rawtext = re.findall(r'(?s)<TEXT>(.*?)</TEXT>', open('./data/ap.txt').read())
testDocIdx = 1
mostSimDocIdx = pdists[testDocIdx].argmin()
print rawtext[testDocIdx]
print
print
print
print rawtext[mostSimDocIdx]
corpora.MmCorpus.serialize('./data/ap.mm', corpus)
mm = corpora.mmcorpus.MmCorpus('./data/ap.mm')
hdp = models.hdpmodel.HdpModel(corpus, corpus.id2word)
print type(hdp)
print dir(hdp)
| mit |
Titan-C/scikit-learn | sklearn/decomposition/nmf.py | 8 | 45009 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck
# Mathieu Blondel <[email protected]>
# Tom Dupre la Tour
# License: BSD 3 clause
from __future__ import division, print_function
from math import sqrt
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.extmath import safe_min
from ..utils.validation import check_is_fitted, check_non_negative
from ..exceptions import ConvergenceWarning
from .cdnmf_fast import _update_cdnmf_fast
EPSILON = np.finfo(np.float32).eps
INTEGER_TYPES = (numbers.Integral, np.integer)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _check_init(A, shape, whom):
A = check_array(A)
if np.shape(A) != shape:
raise ValueError('Array with wrong shape passed to %s. Expected %s, '
'but got %s ' % (whom, shape, np.shape(A)))
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError('Array passed to %s is full of zeros.' % whom)
def _beta_divergence(X, W, H, beta, square_root=False):
"""Compute the beta-divergence of X and dot(W, H).
Parameters
----------
X : float or array-like, shape (n_samples, n_features)
W : float or dense array-like, shape (n_samples, n_components)
H : float or dense array-like, shape (n_components, n_features)
beta : float, string in {'frobenius', 'kullback-leibler', 'itakura-saito'}
Parameter of the beta-divergence.
If beta == 2, this is half the Frobenius *squared* norm.
If beta == 1, this is the generalized Kullback-Leibler divergence.
If beta == 0, this is the Itakura-Saito divergence.
Else, this is the general beta-divergence.
square_root : boolean, default False
If True, return np.sqrt(2 * res)
For beta == 2, it corresponds to the Frobenius norm.
Returns
-------
res : float
Beta divergence of X and np.dot(X, H)
"""
beta = _beta_loss_to_float(beta)
# The method can be called with scalars
if not sp.issparse(X):
X = np.atleast_2d(X)
W = np.atleast_2d(W)
H = np.atleast_2d(H)
# Frobenius norm
if beta == 2:
# Avoid the creation of the dense np.dot(W, H) if X is sparse.
if sp.issparse(X):
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
res = (norm_X + norm_WH - 2. * cross_prod) / 2.
else:
res = squared_norm(X - np.dot(W, H)) / 2.
if square_root:
return np.sqrt(res * 2)
else:
return res
if sp.issparse(X):
# compute np.dot(W, H) only where X is nonzero
WH_data = _special_sparse_dot(W, H, X).data
X_data = X.data
else:
WH = np.dot(W, H)
WH_data = WH.ravel()
X_data = X.ravel()
# do not affect the zeros: here 0 ** (-1) = 0 and not infinity
WH_data = WH_data[X_data != 0]
X_data = X_data[X_data != 0]
# used to avoid division by zero
WH_data[WH_data == 0] = EPSILON
# generalized Kullback-Leibler divergence
if beta == 1:
# fast and memory efficient computation of np.sum(np.dot(W, H))
sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
# computes np.sum(X * log(X / WH)) only where X is nonzero
div = X_data / WH_data
res = np.dot(X_data, np.log(div))
# add full np.sum(np.dot(W, H)) - np.sum(X)
res += sum_WH - X_data.sum()
# Itakura-Saito divergence
elif beta == 0:
div = X_data / WH_data
res = np.sum(div) - np.product(X.shape) - np.sum(np.log(div))
# beta-divergence, beta not in (0, 1, 2)
else:
if sp.issparse(X):
# slow loop, but memory efficient computation of :
# np.sum(np.dot(W, H) ** beta)
sum_WH_beta = 0
for i in range(X.shape[1]):
sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta)
else:
sum_WH_beta = np.sum(WH ** beta)
sum_X_WH = np.dot(X_data, WH_data ** (beta - 1))
res = (X_data ** beta).sum() - beta * sum_X_WH
res += sum_WH_beta * (beta - 1)
res /= beta * (beta - 1)
if square_root:
return np.sqrt(2 * res)
else:
return res
def _special_sparse_dot(W, H, X):
"""Computes np.dot(W, H), only where X is non zero."""
if sp.issparse(X):
ii, jj = X.nonzero()
dot_vals = np.multiply(W[ii, :], H.T[jj, :]).sum(axis=1)
WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape)
return WH.tocsr()
else:
return np.dot(W, H)
def _compute_regularization(alpha, l1_ratio, regularization):
"""Compute L1 and L2 regularization coefficients for W and H"""
alpha_H = 0.
alpha_W = 0.
if regularization in ('both', 'components'):
alpha_H = float(alpha)
if regularization in ('both', 'transformation'):
alpha_W = float(alpha)
l1_reg_W = alpha_W * l1_ratio
l1_reg_H = alpha_H * l1_ratio
l2_reg_W = alpha_W * (1. - l1_ratio)
l2_reg_H = alpha_H * (1. - l1_ratio)
return l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H
def _check_string_param(solver, regularization, beta_loss, init):
allowed_solver = ('cd', 'mu')
if solver not in allowed_solver:
raise ValueError(
'Invalid solver parameter: got %r instead of one of %r' %
(solver, allowed_solver))
allowed_regularization = ('both', 'components', 'transformation', None)
if regularization not in allowed_regularization:
raise ValueError(
'Invalid regularization parameter: got %r instead of one of %r' %
(regularization, allowed_regularization))
# 'mu' is the only solver that handles other beta losses than 'frobenius'
if solver != 'mu' and beta_loss not in (2, 'frobenius'):
raise ValueError(
'Invalid beta_loss parameter: solver %r does not handle beta_loss'
' = %r' % (solver, beta_loss))
if solver == 'mu' and init == 'nndsvd':
warnings.warn("The multiplicative update ('mu') solver cannot update "
"zeros present in the initialization, and so leads to "
"poorer results when used jointly with init='nndsvd'. "
"You may try init='nndsvda' or init='nndsvdar' instead.",
UserWarning)
beta_loss = _beta_loss_to_float(beta_loss)
return beta_loss
def _beta_loss_to_float(beta_loss):
"""Convert string beta_loss to float"""
allowed_beta_loss = {'frobenius': 2,
'kullback-leibler': 1,
'itakura-saito': 0}
if isinstance(beta_loss, str) and beta_loss in allowed_beta_loss:
beta_loss = allowed_beta_loss[beta_loss]
if not isinstance(beta_loss, numbers.Number):
raise ValueError('Invalid beta_loss parameter: got %r instead '
'of one of %r, or a float.' %
(beta_loss, allowed_beta_loss.keys()))
return beta_loss
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : integer
The number of components desired in the approximation.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise 'random'.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
eps : float
Truncate all values less then this in output to zero.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``random`` == 'nndsvdar' or 'random'.
Returns
-------
W : array-like, shape (n_samples, n_components)
Initial guesses for solving X ~= WH
H : array-like, shape (n_components, n_features)
Initial guesses for solving X ~= WH
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if init is None:
if n_components < n_features:
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features)
W = avg * rng.randn(n_samples, n_components)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle,
random_state):
"""Helper function for _fit_coordinate_descent
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...)
"""
n_components = Ht.shape[1]
HHt = np.dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L2 regularization corresponds to increase of the diagonal of HHt
if l2_reg != 0.:
# adds l2_reg only on the diagonal
HHt.flat[::n_components + 1] += l2_reg
# L1 regularization corresponds to decrease of each element of XHt
if l1_reg != 0.:
XHt -= l1_reg
if shuffle:
permutation = random_state.permutation(n_components)
else:
permutation = np.arange(n_components)
# The following seems to be required on 64-bit Windows w/ Python 3.5.
permutation = np.asarray(permutation, dtype=np.intp)
return _update_cdnmf_fast(W, HHt, XHt, permutation)
def _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, l1_reg_W=0,
l1_reg_H=0, l2_reg_W=0, l2_reg_H=0, update_H=True,
verbose=0, shuffle=False, random_state=None):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
l1_reg_W : double, default: 0.
L1 regularization parameter for W.
l1_reg_H : double, default: 0.
L1 regularization parameter for H.
l2_reg_W : double, default: 0.
L2 regularization parameter for W.
l2_reg_H : double, default: 0.
L2 regularization parameter for H.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order='C')
X = check_array(X, accept_sparse='csr')
rng = check_random_state(random_state)
for n_iter in range(max_iter):
violation = 0.
# Update W
violation += _update_coordinate_descent(X, W, Ht, l1_reg_W,
l2_reg_W, shuffle, rng)
# Update H
if update_H:
violation += _update_coordinate_descent(X.T, Ht, W, l1_reg_H,
l2_reg_H, shuffle, rng)
if n_iter == 0:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def _multiplicative_update_w(X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma,
H_sum=None, HHt=None, XHt=None, update_H=True):
"""update W in Multiplicative Update NMF"""
if beta_loss == 2:
# Numerator
if XHt is None:
XHt = safe_sparse_dot(X, H.T)
if update_H:
# avoid a copy of XHt, which will be re-computed (update_H=True)
numerator = XHt
else:
# preserve the XHt, which is not re-computed (update_H=False)
numerator = XHt.copy()
# Denominator
if HHt is None:
HHt = np.dot(H, H.T)
denominator = np.dot(W, HHt)
else:
# Numerator
# if X is sparse, compute WH only where X is non zero
WH_safe_X = _special_sparse_dot(W, H, X)
if sp.issparse(X):
WH_safe_X_data = WH_safe_X.data
X_data = X.data
else:
WH_safe_X_data = WH_safe_X
X_data = X
# copy used in the Denominator
WH = WH_safe_X.copy()
if beta_loss - 1. < 0:
WH[WH == 0] = EPSILON
# to avoid taking a negative power of zero
if beta_loss - 2. < 0:
WH_safe_X_data[WH_safe_X_data == 0] = EPSILON
if beta_loss == 1:
np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
else:
WH_safe_X_data **= beta_loss - 2
# element-wise multiplication
WH_safe_X_data *= X_data
# here numerator = dot(X * (dot(W, H) ** (beta_loss - 2)), H.T)
numerator = safe_sparse_dot(WH_safe_X, H.T)
# Denominator
if beta_loss == 1:
if H_sum is None:
H_sum = np.sum(H, axis=1) # shape(n_components, )
denominator = H_sum[np.newaxis, :]
else:
# computation of WHHt = dot(dot(W, H) ** beta_loss - 1, H.T)
if sp.issparse(X):
# memory efficient computation
# (compute row by row, avoiding the dense matrix WH)
WHHt = np.empty(W.shape)
for i in range(X.shape[0]):
WHi = np.dot(W[i, :], H)
if beta_loss - 1 < 0:
WHi[WHi == 0] = EPSILON
WHi **= beta_loss - 1
WHHt[i, :] = np.dot(WHi, H.T)
else:
WH **= beta_loss - 1
WHHt = np.dot(WH, H.T)
denominator = WHHt
# Add L1 and L2 regularization
if l1_reg_W > 0:
denominator += l1_reg_W
if l2_reg_W > 0:
denominator = denominator + l2_reg_W * W
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta_W = numerator
# gamma is in ]0, 1]
if gamma != 1:
delta_W **= gamma
return delta_W, H_sum, HHt, XHt
def _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma):
"""update H in Multiplicative Update NMF"""
if beta_loss == 2:
numerator = safe_sparse_dot(W.T, X)
denominator = np.dot(np.dot(W.T, W), H)
else:
# Numerator
WH_safe_X = _special_sparse_dot(W, H, X)
if sp.issparse(X):
WH_safe_X_data = WH_safe_X.data
X_data = X.data
else:
WH_safe_X_data = WH_safe_X
X_data = X
# copy used in the Denominator
WH = WH_safe_X.copy()
if beta_loss - 1. < 0:
WH[WH == 0] = EPSILON
# to avoid division by zero
if beta_loss - 2. < 0:
WH_safe_X_data[WH_safe_X_data == 0] = EPSILON
if beta_loss == 1:
np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
else:
WH_safe_X_data **= beta_loss - 2
# element-wise multiplication
WH_safe_X_data *= X_data
# here numerator = dot(W.T, (dot(W, H) ** (beta_loss - 2)) * X)
numerator = safe_sparse_dot(W.T, WH_safe_X)
# Denominator
if beta_loss == 1:
W_sum = np.sum(W, axis=0) # shape(n_components, )
W_sum[W_sum == 0] = 1.
denominator = W_sum[:, np.newaxis]
# beta_loss not in (1, 2)
else:
# computation of WtWH = dot(W.T, dot(W, H) ** beta_loss - 1)
if sp.issparse(X):
# memory efficient computation
# (compute column by column, avoiding the dense matrix WH)
WtWH = np.empty(H.shape)
for i in range(X.shape[1]):
WHi = np.dot(W, H[:, i])
if beta_loss - 1 < 0:
WHi[WHi == 0] = EPSILON
WHi **= beta_loss - 1
WtWH[:, i] = np.dot(W.T, WHi)
else:
WH **= beta_loss - 1
WtWH = np.dot(W.T, WH)
denominator = WtWH
# Add L1 and L2 regularization
if l1_reg_H > 0:
denominator += l1_reg_H
if l2_reg_H > 0:
denominator = denominator + l2_reg_H * H
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta_H = numerator
# gamma is in ]0, 1]
if gamma != 1:
delta_H **= gamma
return delta_H
def _fit_multiplicative_update(X, W, H, beta_loss='frobenius',
max_iter=200, tol=1e-4,
l1_reg_W=0, l1_reg_H=0, l2_reg_W=0, l2_reg_H=0,
update_H=True, verbose=0):
"""Compute Non-negative Matrix Factorization with Multiplicative Update
The objective function is _beta_divergence(X, WH) and is minimized with an
alternating minimization of W and H. Each minimization is done with a
Multiplicative Update.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant input matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
beta_loss : float or string, default 'frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros.
max_iter : integer, default: 200
Number of iterations.
tol : float, default: 1e-4
Tolerance of the stopping condition.
l1_reg_W : double, default: 0.
L1 regularization parameter for W.
l1_reg_H : double, default: 0.
L1 regularization parameter for H.
l2_reg_W : double, default: 0.
L2 regularization parameter for W.
l2_reg_H : double, default: 0.
L2 regularization parameter for H.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
Returns
-------
W : array, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
"""
start_time = time.time()
beta_loss = _beta_loss_to_float(beta_loss)
# gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011]
if beta_loss < 1:
gamma = 1. / (2. - beta_loss)
elif beta_loss > 2:
gamma = 1. / (beta_loss - 1.)
else:
gamma = 1.
# used for the convergence criterion
error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True)
previous_error = error_at_init
H_sum, HHt, XHt = None, None, None
for n_iter in range(1, max_iter + 1):
# update W
# H_sum, HHt and XHt are saved and reused if not update_H
delta_W, H_sum, HHt, XHt = _multiplicative_update_w(
X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma,
H_sum, HHt, XHt, update_H)
W *= delta_W
# necessary for stability with beta_loss < 1
if beta_loss < 1:
W[W < np.finfo(np.float64).eps] = 0.
# update H
if update_H:
delta_H = _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H,
l2_reg_H, gamma)
H *= delta_H
# These values will be recomputed since H changed
H_sum, HHt, XHt = None, None, None
# necessary for stability with beta_loss < 1
if beta_loss <= 1:
H[H < np.finfo(np.float64).eps] = 0.
# test convergence criterion every 10 iterations
if tol > 0 and n_iter % 10 == 0:
error = _beta_divergence(X, W, H, beta_loss, square_root=True)
if verbose:
iter_time = time.time()
print("Epoch %02d reached after %.3f seconds, error: %f" %
(n_iter, iter_time - start_time, error))
if (previous_error - error) / error_at_init < tol:
break
previous_error = error
# do not print if we have already printed in the convergence test
if verbose and (tol == 0 or n_iter % 10 != 0):
end_time = time.time()
print("Epoch %02d reached after %.3f seconds." %
(n_iter, end_time - start_time))
return W, H, n_iter
def non_negative_factorization(X, W=None, H=None, n_components=None,
init='random', update_H=True, solver='cd',
beta_loss='frobenius', tol=1e-4,
max_iter=200, alpha=0., l1_ratio=0.,
regularization=None, random_state=None,
verbose=0, shuffle=False):
"""Compute Non-negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
For multiplicative-update ('mu') solver, the Frobenius norm
(0.5 * ||X - WH||_Fro^2) can be changed into another beta-divergence loss,
by changing the beta_loss parameter.
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
If update_H=False, it is used as a constant, to solve for W only.
n_components : integer
Number of components, if n_components is not set all features
are kept.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : 'cd' | 'mu'
Numerical solver to use:
'cd' is a Coordinate Descent solver.
'mu' is a Multiplicative Update solver.
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionadded:: 0.19
Multiplicative Update solver.
beta_loss : float or string, default 'frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros. Used only in 'mu' solver.
.. versionadded:: 0.19
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import non_negative_factorization
>>> W, H, n_iter = non_negative_factorization(X, n_components=2, \
init='random', random_state=0)
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
"""
X = check_array(X, accept_sparse=('csr', 'csc'), dtype=float)
check_non_negative(X, "NMF (input X)")
beta_loss = _check_string_param(solver, regularization, beta_loss, init)
if safe_min(X) == 0 and beta_loss <= 0:
raise ValueError("When beta_loss <= 0 and X contains zeros, "
"the solver may diverge. Please add small values to "
"X, or use a positive beta_loss.")
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
if not isinstance(n_components, INTEGER_TYPES) or n_components <= 0:
raise ValueError("Number of components must be a positive integer;"
" got (n_components=%r)" % n_components)
if not isinstance(max_iter, INTEGER_TYPES) or max_iter < 0:
raise ValueError("Maximum number of iterations must be a positive "
"integer; got (max_iter=%r)" % max_iter)
if not isinstance(tol, numbers.Number) or tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % tol)
# check W and H, or initialize them
if init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
# 'mu' solver should not be initialized by zeros
if solver == 'mu':
avg = np.sqrt(X.mean() / n_components)
W = avg * np.ones((n_samples, n_components))
else:
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=init,
random_state=random_state)
l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = _compute_regularization(
alpha, l1_ratio, regularization)
if solver == 'cd':
W, H, n_iter = _fit_coordinate_descent(X, W, H, tol, max_iter,
l1_reg_W, l1_reg_H,
l2_reg_W, l2_reg_H,
update_H=update_H,
verbose=verbose,
shuffle=shuffle,
random_state=random_state)
elif solver == 'mu':
W, H, n_iter = _fit_multiplicative_update(X, W, H, beta_loss, max_iter,
tol, l1_reg_W, l1_reg_H,
l2_reg_W, l2_reg_H, update_H,
verbose)
else:
raise ValueError("Invalid solver parameter '%s'." % solver)
if n_iter == max_iter and tol > 0:
warnings.warn("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter, ConvergenceWarning)
return W, H, n_iter
class NMF(BaseEstimator, TransformerMixin):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
For multiplicative-update ('mu') solver, the Frobenius norm
(0.5 * ||X - WH||_Fro^2) can be changed into another beta-divergence loss,
by changing the beta_loss parameter.
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
solver : 'cd' | 'mu'
Numerical solver to use:
'cd' is a Coordinate Descent solver.
'mu' is a Multiplicative Update solver.
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionadded:: 0.19
Multiplicative Update solver.
beta_loss : float or string, default 'frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros. Used only in 'mu' solver.
.. versionadded:: 0.19
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent
solver.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
Attributes
----------
components_ : array, [n_components, n_features]
Factorization matrix, sometimes called 'dictionary'.
reconstruction_err_ : number
Frobenius norm of the matrix difference, or beta-divergence, between
the training data ``X`` and the reconstructed data ``WH`` from
the fitted model.
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> W = model.fit_transform(X)
>>> H = model.components_
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
"""
def __init__(self, n_components=None, init=None, solver='cd',
beta_loss='frobenius', tol=1e-4, max_iter=200,
random_state=None, alpha=0., l1_ratio=0., verbose=0,
shuffle=False):
self.n_components = n_components
self.init = init
self.solver = solver
self.beta_loss = beta_loss
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
Returns
-------
W : array, shape (n_samples, n_components)
Transformed data.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), dtype=float)
W, H, n_iter_ = non_negative_factorization(
X=X, W=W, H=H, n_components=self.n_components, init=self.init,
update_H=True, solver=self.solver, beta_loss=self.beta_loss,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle)
self.reconstruction_err_ = _beta_divergence(X, W, H, self.beta_loss,
square_root=True)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Returns
-------
W : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'n_components_')
W, _, n_iter_ = non_negative_factorization(
X=X, W=None, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False, solver=self.solver,
beta_loss=self.beta_loss, tol=self.tol, max_iter=self.max_iter,
alpha=self.alpha, l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle)
return W
def inverse_transform(self, W):
"""Transform data back to its original space.
Parameters
----------
W : {array-like, sparse matrix}, shape (n_samples, n_components)
Transformed data matrix
Returns
-------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix of original shape
.. versionadded:: 0.18
"""
check_is_fitted(self, 'n_components_')
return np.dot(W, self.components_)
| bsd-3-clause |
Fireblend/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
marcocaccin/scikit-learn | sklearn/datasets/tests/test_base.py | 33 | 6143 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
jorik041/scikit-learn | sklearn/linear_model/logistic.py | 105 | 56686 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (as_float_array, DataConversionWarning,
check_X_y)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg and lbfgs solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=True,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when logistic_regression_path
is called repeatedly with the same data, as y is modified
along the path.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
_, n_features = X.shape
check_consistent_length(X, y)
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
sample_weight = np.ones(X.shape[0])
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = [-1, 1]
mask = (y == pos_class)
y[mask] = 1
y[~mask] = -1
# To take care of object dtypes, i.e 1 and -1 are in the form of
# strings.
y = as_float_array(y, copy=False)
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
for C in Cs:
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0 = newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter,
tol=tol)
elif solver == 'liblinear':
coef_, intercept_, _, = _fit_liblinear(
X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0)
return coefs, np.array(Cs)
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, copy=True, intercept_scaling=1.,
multi_class='ovr'):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when ``_log_reg_scoring_path`` is called
repeatedly with the same data, as y is modified along the path.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test[mask] = 1
y_test[~mask] = -1
# To deal with object dtypes, we need to convert into an array of floats.
y_test = as_float_array(y_test, copy=False)
coefs, Cs = logistic_regression_path(X_train, y_train, Cs=Cs,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
copy=copy, pos_class=pos_class,
multi_class=multi_class,
tol=tol, verbose=verbose,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores)
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
See also
--------
sklearn.linear_model.SGDClassifier
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
self.classes_ = np.unique(y)
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state)
return self
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
for ind, class_ in enumerate(classes_):
coef_, _ = logistic_regression_path(
X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight)
self.coef_.append(coef_[0])
self.coef_ = np.squeeze(self.coef_)
# For the binary case, this get squeezed to a 1-D array.
if self.coef_.ndim == 1:
self.coef_ = self.coef_[np.newaxis, :]
self.coef_ = np.asarray(self.coef_)
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg or
LBFGS optimizer. The newton-cg and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr'):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, dtype=None)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in ['balanced', 'auto']):
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
path_func = delayed(_log_reg_scoring_path)
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
else:
coefs_paths, Cs, scores = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1))
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
odellus/year_of_code | observations_gym.py | 1 | 2013 | #! /usr/bin/env python
import gym
import numpy as np
from matplotlib import pyplot as plt
import time
def random_sample(environment):
env = gym.make(environment)
img = env.render(mode='rgb_array')
observation = env.reset()
n_obs = observation.shape[0]
ih, iw, n_channels = img.shape
n_episodes = 20
n_timesteps = 100
n_images = n_episodes * n_timesteps
# Create empty arrays to store information from environment.
images = np.zeros((n_images, ih, iw, n_channels))
obs = np.zeros((n_images, n_obs))
rewards = np.zeros((n_images,))
actions = np.zeros((n_images,))
for i_episode in range(n_episodes):
observation = env.reset()
for t in range(n_timesteps):
# Keeps track of the total number of observations, rewards, etc...
k = n_timesteps*i_episode + t
# Get the image from the screen. This will be part of observed quantity.
img = env.render(mode='rgb_array')
images[k,:,:,:] = img
# We're only randomly sampling right now. Later this will be
# replace by the argmax of a final softmax layer from a CNN of the image
# combined in some way with the observation given directly by gym.
action = env.action_space.sample()
# Step forward the environment with agent taking the sampled action.
# Collect new observations
observation, reward, done, info = env.step(action)
obs[k,:] = observation
rewards[k] = reward
actions[k] = action
if done:
print("Episode finished after {} timesteps".format(t+1))
break
return obs, rewards, actions, images
def test_random_sample():
obs, rewards, actions, images = random_sample('Boxing-ram-v0')
print(images.shape)
rando = np.random.randint(0,len(images))
plt.imshow(images[rando,:,:,:])
plt.show()
if __name__ == "__main__":
test_random_sample()
| mit |
gandalfgreyheme/TMFFAP | LISM_Text_Iterative.py | 1 | 2862 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 17 17:10:39 2016
Logical Item Set Mining (LISM) V 0.0
LISM Implementation in Python.
Read more here: http://cvit.iiit.ac.in/papers/Chandrashekar2012Logical.pdf
@author: saurabh.choudhary
"""
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
import os
#Setting Hyperparameters
#Threshold below which the co-occurances are set to 0
#ThetaCooc>=1; int
ThetaCooc = 1
#Threshold below which the normalised PMI is set to 0
#ThetaConsy ->{0,1}
ThetaConsy = 0.3
#Number of iterations for denoising
nIter=10
#Stage 0 - Get Data
path = r"D:\Data Experiments\VH Verbatim Analysis"
os.chdir(path)
text=os.path.join(path, "TDM_test.csv")
tDF=pd.read_csv(text)
vec=CountVectorizer(stop_words='english',binary=True)
tdm=vec.fit_transform(tDF['Comments'])
tdm=tdm.toarray()
words=vec.get_feature_names()
words=np.array(words)
#nTDM = pd.DataFrame(tdm, columns=words) #named TDM
#Stage 1: LISM counting
# 1.1: Cooccurance Counts {psi (alpha, beta)}
cooc_raw = np.dot(tdm.transpose(),tdm) #Calculate Co-occurance Matrix
np.fill_diagonal(cooc_raw, 0) #make the cooc matrix diagnol 0 P1 != P2
cooc_zero=cooc_raw>=ThetaCooc
cooc_zero=cooc_zero*1 #Applying delta -> 1 if cooc, 0 if otherwise
cooc_cnt=cooc_zero
for i in range(0,nIter):
# 1.2: Margianal Counts {psi (alpha)}
marginal_cnt = np.sum(cooc_cnt, axis=1)
# 1.3: Total Counts {psi0}
total_cnt=0.5*np.sum(marginal_cnt)
#Calculate Co Oc and Marginal probabilities
cooc_prob=cooc_cnt/total_cnt #P(a,b)
marginal_prob=marginal_cnt/total_cnt #P(a)
#Stage 2: LISM consistency
#Calculate pointwise mutual information = max{0,ln(P(a,b)/(P(a)*P(b))}
PMI = cooc_prob/np.outer(marginal_prob,marginal_prob)
# outer product produces an MxM matrix for an M dimensional vector giving us
# a vectorised implementation of P(a)*P(b)
PMI=np.log(PMI)
PMI[PMI<=0]=0
#Calcluate normalised PMI = PMI/(-ln(P(a,b)))
ln_cooc_prob = -np.log(cooc_prob)
nPMI=np.divide(PMI,ln_cooc_prob)
nPMI[np.isnan(nPMI)==True]=0
nPMI[nPMI<ThetaConsy]=0
nPMImask=nPMI>0
cooc_count_t0=np.sum(cooc_cnt)
cooc_cnt=np.multiply(cooc_zero,nPMImask)
quality=np.sum(np.multiply(cooc_prob,nPMI))
print("Quality = {}".format(quality))
if(np.sum(cooc_cnt)==cooc_count_t0):
print("Iterations taken = {}".format(i+1))
print("Number of latent structures found = {}".format(np.sum(cooc_cnt)/2))
break
elif(i==9):
print("Did not converge in {} iterations".format(nIter)) | gpl-3.0 |
romanz/electrum | gui/qt/history_list.py | 1 | 17382 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import webbrowser
import datetime
from electrum.wallet import AddTransactionException, TX_HEIGHT_LOCAL
from .util import *
from electrum.i18n import _
from electrum.util import block_explorer_URL, profiler
try:
from electrum.plot import plot_history, NothingToPlotException
except:
plot_history = None
# note: this list needs to be kept in sync with another in kivy
TX_ICONS = [
"warning.png",
"warning.png",
"unconfirmed.png",
"unconfirmed.png",
"offline_tx.png",
"clock1.png",
"clock2.png",
"clock3.png",
"clock4.png",
"clock5.png",
"confirmed.png",
]
class HistoryList(MyTreeWidget, AcceptFileDragDrop):
filter_columns = [2, 3, 4] # Date, Description, Amount
def __init__(self, parent=None):
MyTreeWidget.__init__(self, parent, self.create_menu, [], 3)
AcceptFileDragDrop.__init__(self, ".txn")
self.refresh_headers()
self.setColumnHidden(1, True)
self.start_timestamp = None
self.end_timestamp = None
self.years = []
def refresh_headers(self):
headers = ['', '', _('Date'), _('Description'), _('Amount'), _('Balance')]
fx = self.parent.fx
if fx and fx.show_history():
headers.extend(['%s '%fx.ccy + _('Value')])
headers.extend(['%s '%fx.ccy + _('Acquisition price')])
headers.extend(['%s '%fx.ccy + _('Capital Gains')])
self.editable_columns |= {6}
else:
self.editable_columns -= {6}
self.update_headers(headers)
def get_domain(self):
'''Replaced in address_dialog.py'''
return self.wallet.get_addresses()
def on_combo(self, x):
s = self.period_combo.itemText(x)
if s == _('All'):
self.start_timestamp = None
self.end_timestamp = None
elif s == _('Custom'):
start_date = self.select_date()
else:
try:
year = int(s)
except:
return
start_date = datetime.datetime(year, 1, 1)
end_date = datetime.datetime(year+1, 1, 1)
self.start_timestamp = time.mktime(start_date.timetuple())
self.end_timestamp = time.mktime(end_date.timetuple())
self.update()
def get_list_header(self):
self.period_combo = QComboBox()
self.period_combo.addItems([_('All'), _('Custom')])
self.period_combo.activated.connect(self.on_combo)
self.summary_button = QPushButton(_('Summary'))
self.summary_button.pressed.connect(self.show_summary)
self.export_button = QPushButton(_('Export'))
self.export_button.pressed.connect(self.export_history_dialog)
self.plot_button = QPushButton(_('Plot'))
self.plot_button.pressed.connect(self.plot_history_dialog)
return self.period_combo, self.summary_button, self.export_button, self.plot_button
def select_date(self):
h = self.summary
d = WindowModalDialog(self, _("Custom dates"))
d.setMinimumSize(600, 150)
d.b = True
d.start_date = None
d.end_date = None
vbox = QVBoxLayout()
grid = QGridLayout()
start_edit = QPushButton()
def on_start():
start_edit.setText('')
d.b = True
d.start_date = None
start_edit.pressed.connect(on_start)
def on_end():
end_edit.setText('')
d.b = False
d.end_date = None
end_edit = QPushButton()
end_edit.pressed.connect(on_end)
grid.addWidget(QLabel(_("Start date")), 0, 0)
grid.addWidget(start_edit, 0, 1)
grid.addWidget(QLabel(_("End date")), 1, 0)
grid.addWidget(end_edit, 1, 1)
def on_date(date):
ts = time.mktime(date.toPyDate().timetuple())
if d.b:
d.start_date = ts
start_edit.setText(date.toString())
else:
d.end_date = ts
end_edit.setText(date.toString())
cal = QCalendarWidget()
cal.setGridVisible(True)
cal.clicked[QDate].connect(on_date)
vbox.addLayout(grid)
vbox.addWidget(cal)
vbox.addLayout(Buttons(OkButton(d), CancelButton(d)))
d.setLayout(vbox)
if d.exec_():
self.start_timestamp = d.start_date
self.end_timestamp = d.end_date
self.update()
def show_summary(self):
h = self.summary
start_date = h.get('start_date')
end_date = h.get('end_date')
if start_date is None or end_date is None:
self.parent.show_message(_("Nothing to summarize."))
return
format_amount = lambda x: self.parent.format_amount(x) + ' ' + self.parent.base_unit()
d = WindowModalDialog(self, _("Summary"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
grid = QGridLayout()
grid.addWidget(QLabel(_("Start")), 0, 0)
grid.addWidget(QLabel(start_date.isoformat(' ')), 0, 1)
grid.addWidget(QLabel(_("End")), 1, 0)
grid.addWidget(QLabel(end_date.isoformat(' ')), 1, 1)
grid.addWidget(QLabel(_("Initial balance")), 2, 0)
grid.addWidget(QLabel(format_amount(h['start_balance'].value)), 2, 1)
grid.addWidget(QLabel(str(h.get('start_fiat_balance'))), 2, 2)
grid.addWidget(QLabel(_("Final balance")), 4, 0)
grid.addWidget(QLabel(format_amount(h['end_balance'].value)), 4, 1)
grid.addWidget(QLabel(str(h.get('end_fiat_balance'))), 4, 2)
grid.addWidget(QLabel(_("Income")), 6, 0)
grid.addWidget(QLabel(str(h.get('fiat_income'))), 6, 2)
grid.addWidget(QLabel(_("Capital gains")), 7, 0)
grid.addWidget(QLabel(str(h.get('capital_gains'))), 7, 2)
grid.addWidget(QLabel(_("Unrealized gains")), 8, 0)
grid.addWidget(QLabel(str(h.get('unrealized_gains', ''))), 8, 2)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
def plot_history_dialog(self):
if plot_history is None:
self.parent.show_message(
_("Can't plot history.") + '\n' +
_("Perhaps some dependencies are missing...") + " (matplotlib?)")
return
try:
plt = plot_history(self.transactions)
plt.show()
except NothingToPlotException as e:
self.parent.show_message(str(e))
@profiler
def on_update(self):
self.wallet = self.parent.wallet
fx = self.parent.fx
r = self.wallet.get_full_history(domain=self.get_domain(), from_timestamp=self.start_timestamp, to_timestamp=self.end_timestamp, fx=fx)
self.transactions = r['transactions']
self.summary = r['summary']
if not self.years and self.start_timestamp is None and self.end_timestamp is None:
start_date = self.summary.get('start_date')
end_date = self.summary.get('end_date')
if start_date and end_date:
self.years = [str(i) for i in range(start_date.year, end_date.year + 1)]
self.period_combo.insertItems(1, self.years)
item = self.currentItem()
current_tx = item.data(0, Qt.UserRole) if item else None
self.clear()
if fx: fx.history_used_spot = False
for tx_item in self.transactions:
tx_hash = tx_item['txid']
height = tx_item['height']
conf = tx_item['confirmations']
timestamp = tx_item['timestamp']
value = tx_item['value'].value
balance = tx_item['balance'].value
label = tx_item['label']
status, status_str = self.wallet.get_tx_status(tx_hash, height, conf, timestamp)
has_invoice = self.wallet.invoices.paid.get(tx_hash)
icon = QIcon(":icons/" + TX_ICONS[status])
v_str = self.parent.format_amount(value, True, whitespaces=True)
balance_str = self.parent.format_amount(balance, whitespaces=True)
entry = ['', tx_hash, status_str, label, v_str, balance_str]
fiat_value = None
if value is not None and fx and fx.show_history():
fiat_value = tx_item['fiat_value'].value
value_str = fx.format_fiat(fiat_value)
entry.append(value_str)
# fixme: should use is_mine
if value < 0:
entry.append(fx.format_fiat(tx_item['acquisition_price'].value))
entry.append(fx.format_fiat(tx_item['capital_gain'].value))
item = QTreeWidgetItem(entry)
item.setIcon(0, icon)
item.setToolTip(0, str(conf) + " confirmation" + ("s" if conf != 1 else ""))
if has_invoice:
item.setIcon(3, QIcon(":icons/seal"))
for i in range(len(entry)):
if i>3:
item.setTextAlignment(i, Qt.AlignRight)
if i!=2:
item.setFont(i, QFont(MONOSPACE_FONT))
if value and value < 0:
item.setForeground(3, QBrush(QColor("#BC1E1E")))
item.setForeground(4, QBrush(QColor("#BC1E1E")))
if fiat_value and not tx_item['fiat_default']:
item.setForeground(6, QBrush(QColor("#1E1EFF")))
if tx_hash:
item.setData(0, Qt.UserRole, tx_hash)
self.insertTopLevelItem(0, item)
if current_tx == tx_hash:
self.setCurrentItem(item)
def on_edited(self, item, column, prior):
'''Called only when the text actually changes'''
key = item.data(0, Qt.UserRole)
text = item.text(column)
# fixme
if column == 3:
self.parent.wallet.set_label(key, text)
self.update_labels()
self.parent.update_completions()
elif column == 6:
self.parent.wallet.set_fiat_value(key, self.parent.fx.ccy, text)
self.on_update()
def on_doubleclick(self, item, column):
if self.permit_edit(item, column):
super(HistoryList, self).on_doubleclick(item, column)
else:
tx_hash = item.data(0, Qt.UserRole)
tx = self.wallet.transactions.get(tx_hash)
self.parent.show_transaction(tx)
def update_labels(self):
root = self.invisibleRootItem()
child_count = root.childCount()
for i in range(child_count):
item = root.child(i)
txid = item.data(0, Qt.UserRole)
label = self.wallet.get_label(txid)
item.setText(3, label)
def update_item(self, tx_hash, height, conf, timestamp):
status, status_str = self.wallet.get_tx_status(tx_hash, height, conf, timestamp)
icon = QIcon(":icons/" + TX_ICONS[status])
items = self.findItems(tx_hash, Qt.UserRole|Qt.MatchContains|Qt.MatchRecursive, column=1)
if items:
item = items[0]
item.setIcon(0, icon)
item.setText(2, status_str)
def create_menu(self, position):
self.selectedIndexes()
item = self.currentItem()
if not item:
return
column = self.currentColumn()
tx_hash = item.data(0, Qt.UserRole)
if not tx_hash:
return
if column is 0:
column_title = "ID"
column_data = tx_hash
else:
column_title = self.headerItem().text(column)
column_data = item.text(column)
tx_URL = block_explorer_URL(self.config, 'tx', tx_hash)
height, conf, timestamp = self.wallet.get_tx_height(tx_hash)
tx = self.wallet.transactions.get(tx_hash)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
is_unconfirmed = height <= 0
pr_key = self.wallet.invoices.paid.get(tx_hash)
menu = QMenu()
if height == TX_HEIGHT_LOCAL:
menu.addAction(_("Remove"), lambda: self.remove_local_tx(tx_hash))
menu.addAction(_("Copy {}").format(column_title), lambda: self.parent.app.clipboard().setText(column_data))
for c in self.editable_columns:
menu.addAction(_("Edit {}").format(self.headerItem().text(c)), lambda: self.editItem(item, c))
menu.addAction(_("Details"), lambda: self.parent.show_transaction(tx))
if is_unconfirmed and tx:
rbf = is_mine and not tx.is_final()
if rbf:
menu.addAction(_("Increase fee"), lambda: self.parent.bump_fee_dialog(tx))
else:
child_tx = self.wallet.cpfp(tx, 0)
if child_tx:
menu.addAction(_("Child pays for parent"), lambda: self.parent.cpfp(tx, child_tx))
if pr_key:
menu.addAction(QIcon(":icons/seal"), _("View invoice"), lambda: self.parent.show_invoice(pr_key))
if tx_URL:
menu.addAction(_("View on block explorer"), lambda: webbrowser.open(tx_URL))
menu.exec_(self.viewport().mapToGlobal(position))
def remove_local_tx(self, delete_tx):
to_delete = {delete_tx}
to_delete |= self.wallet.get_depending_transactions(delete_tx)
question = _("Are you sure you want to remove this transaction?")
if len(to_delete) > 1:
question = _(
"Are you sure you want to remove this transaction and {} child transactions?".format(len(to_delete) - 1)
)
answer = QMessageBox.question(self.parent, _("Please confirm"), question, QMessageBox.Yes, QMessageBox.No)
if answer == QMessageBox.No:
return
for tx in to_delete:
self.wallet.remove_transaction(tx)
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.parent.need_update.set()
def onFileAdded(self, fn):
try:
with open(fn) as f:
tx = self.parent.tx_from_text(f.read())
self.parent.save_transaction_into_wallet(tx)
except IOError as e:
self.parent.show_error(e)
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electrum-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
#run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("Electrum was unable to produce a transaction export.")
self.parent.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.parent.show_message(_("Your wallet history has been successfully exported."))
def do_export_history(self, wallet, fileName, is_csv):
history = self.transactions
lines = []
for item in history:
if is_csv:
lines.append([item['txid'], item.get('label', ''), item['confirmations'], item['value'], item['date']])
else:
lines.append(item)
with open(fileName, "w+") as f:
if is_csv:
import csv
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
from electrum.util import json_encode
f.write(json_encode(history))
| mit |
nukui-s/sscomdetection | compare_nmi.py | 1 | 5704 | import sys
import networkx as nx
import csv
import random
import numpy as np
import os
import time
from itertools import combinations
import pandas as pd
from sscd import SSCD
from ssnmf import SSNMF
import scipy.sparse as ssp
from sklearn.metrics import normalized_mutual_info_score
from ssscd import SynmetricSSCD, KLSynmetricSSCD
from settings import *
np.random.seed(42)
"""Functions definitions"""
def make_matrix_from_elist(elist, nn):
mat = ssp.lil_matrix((nn,nn))
ind = np.array(elist)
mat[ind[:,0],ind[:,1]] = 1
return mat.toarray()
def expand_synmetric(elist):
elist = elist + [(j, i) for i, j in elist]
return elist
def calculate_loss(edge_list, W, H, mlambda, const):
nn = H.shape[0]
edge_list = expand_synmetric(edge_list)
if const:
const = expand_synmetric(const)
X = make_matrix_from_elist(edge_list, nn)
O = make_matrix_from_elist(const, nn)
D = np.diag(O.sum(axis=1))
L = D - O
H = np.mat(H)
W = np.mat(W)
L = np.mat(L)
lse = np.sum(np.power(X - H*H.T,2))
reg_H = (H.T * L * H).trace()
reg_W = (W.T * L * W).trace()
reg = reg_W + reg_H
loss = lse + mlambda * reg
return loss
def calculate_nmi(H, correct_label):
com = H.argmax(axis=1)
nmi = normalized_mutual_info_score(com, correct_label)
return nmi
def export_cost(path, cost_list):
with open(path, "w") as f:
for c in cost_list:
f.write(str(c) + "\n")
if __name__=="__main__":
os.system("rm -rf log")
for density in densities:
dense_label = str(density)
data_path = "data/%s_edge.pkl"%(data_label)
label_path = "data/%s_label.pkl"%(data_label)
const_path = const_base.format(data_label, density)
edge_list = pd.read_pickle(data_path)
correct_label = pd.read_pickle(label_path)
const = pd.read_pickle(const_path)
print(const_path)
if len(const) == 0:
const = [(0, 0)]
abs_adam = SynmetricSSCD(K, method="adam", positivate="abs", mlambda=mlambda, learning_rate=lr_adam, threads=threads)
kl_adam = KLSynmetricSSCD(K, method="adam", positivate="abs", mlambda=mlambda, learning_rate=lr_adam, threads=threads)
clip_adam = SynmetricSSCD(K, method="adam", positivate="clip", mlambda=mlambda, learning_rate=lr_adam, threads=threads)
abs_sgd = SynmetricSSCD(K, method="sgd", positivate="abs", mlambda=mlambda, learning_rate=lr_sgd, threads=threads)
clip_sgd = SynmetricSSCD(K, method="sgd", positivate="clip", mlambda=mlambda, learning_rate=lr_sgd, threads=threads)
#abs_adam = SSCD(K, method="adam", positivate="abs", mlambda=mlambda, learning_rate=lr_adam, threads=threads)
#clip_adam = SSCD(K, method="adam", positivate="clip", mlambda=mlambda, learning_rate=lr_adam, threads=threads)
#abs_sgd = SSCD(K, method="sgd", positivate="abs", mlambda=mlambda, learning_rate=lr_sgd, threads=threads)
#clip_sgd = SSCD(K, method="sgd", positivate="clip", mlambda=mlambda, learning_rate=lr_sgd, threads=threads)
update_rule = SSCD(K, mlambda=mlambda, method="mult", threads=threads)
all_models = {"update_rule":update_rule, "abs_adam": abs_adam, "abs_sgd": abs_sgd,
"clip_adam":clip_adam, "clip_sgd":clip_sgd, "kl_adam": kl_adam}
#models = {"update_rule":update_rule, "abs_adam": abs_adam, "clip_adam":clip_adam}
#models = {"abs_adam": abs_adam, "update_rule":update_rule}
models = {name: all_models[name] for name in used_models}
times = {}
nmis = {}
best_costs = {}
best_times = {}
for _ in range(trials):
for name, model in models.items():
print("******************************")
print(name)
times.setdefault(name, [])
nmis.setdefault(name, [])
best_costs.setdefault(name, [])
start = time.time()
W, H, best_cost, cost_list, H_list = model.fit_and_transform(edge_list, const,
threshold=threshold, steps=max_iters)
elapsed = time.time() - start
loss = calculate_loss(edge_list, W, H, mlambda, const)
nmi = calculate_nmi(H, correct_label)
path = "cost/" + name + "_" + data_label + ".csv"
export_cost(path, cost_list)
print("density: " + str(density))
print("Time: " + str(elapsed))
print("Loss: " + str(loss))
print("NMI: " + str(nmi))
times[name].append(elapsed)
nmis[name].append(nmi)
best_costs[name].append(best_cost)
#import pdb; pdb.set_trace()
result_path = "result/{}_{}.csv".format(exp_name, str(dense_label))
model_names = sorted(list(models.keys()))
with open(result_path, "w") as f:
writer = csv.writer(f)
header = ["model", "mean time", "std time", "mean nmi", "std nmi", "best_cost", "best_nmi","best_time"]
writer.writerow(header)
for name in model_names:
mean_time = np.mean(times[name])
std_time = np.std(times[name])
mean_nmi = np.mean(nmis[name])
std_nmi = np.std(nmis[name])
best_cost = min(best_costs[name])
best_ind = best_costs[name].index(best_cost)
best_nmi = nmis[name][best_ind]
best_time = times[name][best_ind]
result = [name, mean_time, std_time, mean_nmi, std_nmi, best_cost, best_nmi, best_time]
writer.writerow(result)
| apache-2.0 |
Adai0808/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
PyRsw/PyRsw | src/Plot_tools/update_save_1D.py | 1 | 2231 | # Update plot objects if saving
# Assume the field is 1-dimensional
import matplotlib.pyplot as plt
import numpy as np
from smart_time import smart_time
def update_save_1D(sim):
sim.fig.suptitle(smart_time(sim.time))
for var_cnt in range(len(sim.plot_vars)):
var = sim.plot_vars[var_cnt]
# Update plot
for L in range(sim.Nz):
if var == 'u':
if sim.method.lower() == 'sadourny':
to_plot = sim.soln.u[0:sim.Nx,0:sim.Ny+1,L]
elif sim.method.lower() == 'spectral':
to_plot = sim.soln.u[0:sim.Nx,0:sim.Ny,L]
elif var == 'v':
if sim.method.lower() == 'sadourny':
to_plot = sim.soln.v[0:sim.Nx+1,0:sim.Ny,L]
elif sim.method.lower() == 'spectral':
to_plot = sim.soln.v[0:sim.Nx,0:sim.Ny,L]
elif var == 'h':
if sim.method.lower() == 'sadourny':
to_plot = sim.soln.h[0:sim.Nx+1,0:sim.Ny+1,L] - sim.Hs[L]
elif sim.method.lower() == 'spectral':
to_plot = sim.soln.h[0:sim.Nx,0:sim.Ny,L] - sim.Hs[L]
elif var == 'vort':
to_plot = sim.ddx_v(sim.soln.v[:,:,L],sim) \
- sim.ddy_u(sim.soln.u[:,:,L],sim)
to_plot = to_plot.ravel()
if sim.f0 != 0:
to_plot *= 1./sim.f0
elif var == 'div':
h = sim.soln.h[:,:,L]
to_plot = sim.ddx_u(h*sim.soln.u[:,:,L],sim) \
+ sim.ddy_v(h*sim.soln.v[:,:,L],sim)
to_plot = to_plot.ravel()
if sim.f0 != 0:
to_plot *= 1./sim.f0
sim.Qs[var_cnt][L].set_ydata(to_plot)
if len(sim.ylims[var_cnt]) != 2:
sim.axs[var_cnt][L].relim()
tmp = sim.axs[var][L].get_ylim()
sim.axs[var_cnt][L].set_ylim([-np.max(np.abs(tmp)), np.max(np.abs(tmp))]);
sim.axs[var_cnt][L].autoscale_view()
plt.draw()
sim.fig.savefig('Outputs/{0:s}/Frames/frame_{1:05d}.png'.format(sim.run_name,sim.frame_count))
sim.frame_count += 1
| mit |
ycaihua/scikit-learn | sklearn/cluster/birch.py | 6 | 22656 | # Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features
)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features
)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate init_centroids_ throughout rather than centroids_ since
the centroids are just a view of the init_centroids_ .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to init_centroids_.
centroids_ : ndarray
view of init_centroids_.
squared_norm_ : ndarray
view of init_sq_norm_.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
CFNode.centroids_ is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to self.child_.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_
)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the centroids_ of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
kshedstrom/pyroms | examples/Yellow_Sea/Inputs/Initial/remap.py | 1 | 4914 | import numpy as np
import os
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import matplotlib.pyplot as plt
import time
from datetime import datetime
from matplotlib.dates import date2num, num2date
import pyroms
import pyroms_toolbox
import _remapping
class nctime(object):
pass
def remap(src_file, src_varname, src_grd, dst_grd, dmax=0, cdepth=0, kk=0, dst_dir='./'):
# YELLOW grid sub-sample
xrange=(225, 275); yrange=(190, 240)
# get time
nctime.long_name = 'time'
nctime.units = 'days since 1900-01-01 00:00:00'
# time reference "days since 1900-01-01 00:00:00"
ref = datetime(1900, 1, 1, 0, 0, 0)
ref = date2num(ref)
tag = src_file.rsplit('/')[-1].rsplit('_')[-1].rsplit('-')[0]
year = int(tag[:4])
month = int(tag[4:6])
day = int(tag[6:])
time = datetime(year, month, day, 0, 0, 0)
time = date2num(time)
time = time - ref
time = time + 2.5 # 5-day average
# create IC file
dst_file = src_file.rsplit('/')[-1]
dst_file = dst_dir + dst_file[:-4] + '_' + src_varname + '_ic_' + dst_grd.name + '.nc'
print '\nCreating file', dst_file
if os.path.exists(dst_file) is True:
os.remove(dst_file)
pyroms_toolbox.nc_create_roms_file(dst_file, dst_grd, nctime)
# open IC file
nc = netCDF.Dataset(dst_file, 'a', format='NETCDF3_CLASSIC')
#load var
cdf = netCDF.Dataset(src_file)
src_var = cdf.variables[src_varname]
#get missing value
spval = src_var._FillValue
# determine variable dimension
ndim = len(src_var.dimensions)
# YELLOW grid sub-sample
if ndim == 3:
src_var = src_var[:, yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
elif ndim == 2:
src_var = src_var[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
if src_varname == 'ssh':
Bpos = 't'
Cpos = 'rho'
z = src_grd.z_t
Mp, Lp = dst_grd.hgrid.mask_rho.shape
wts_file = 'remap_weights_SODA_2.1.6_to_YELLOW_bilinear_t_to_rho.nc'
dst_varname = 'zeta'
dimensions = ('ocean_time', 'eta_rho', 'xi_rho')
long_name = 'free-surface'
units = 'meter'
field = 'free-surface, scalar, series'
elif src_varname == 'temp':
Bpos = 't'
Cpos = 'rho'
z = src_grd.z_t
Mp, Lp = dst_grd.hgrid.mask_rho.shape
wts_file = 'remap_weights_SODA_2.1.6_to_YELLOW_bilinear_t_to_rho.nc'
dst_varname = 'temp'
dimensions = ('ocean_time', 's_rho', 'eta_rho', 'xi_rho')
long_name = 'potential temperature'
units = 'Celsius'
field = 'temperature, scalar, series'
elif src_varname == 'salt':
Bpos = 't'
Cpos = 'rho'
z = src_grd.z_t
Mp, Lp = dst_grd.hgrid.mask_rho.shape
wts_file = 'remap_weights_SODA_2.1.6_to_YELLOW_bilinear_t_to_rho.nc'
dst_varname = 'salt'
dimensions = ('ocean_time', 's_rho', 'eta_rho', 'xi_rho')
long_name = 'salinity'
units = 'PSU'
field = 'salinity, scalar, series'
else:
raise ValueError, 'Undefined src_varname'
if ndim == 3:
# build intermediate zgrid
zlevel = -z[::-1,0,0]
nzlevel = len(zlevel)
dst_zcoord = pyroms.vgrid.z_coordinate(dst_grd.vgrid.h, zlevel, nzlevel)
dst_grdz = pyroms.grid.ROMS_Grid(dst_grd.name+'_Z', dst_grd.hgrid, dst_zcoord)
# create variable in file
print 'Creating variable', dst_varname
nc.createVariable(dst_varname, 'f8', dimensions, fill_value=spval)
nc.variables[dst_varname].long_name = long_name
nc.variables[dst_varname].units = units
nc.variables[dst_varname].field = field
# remapping
print 'remapping', dst_varname, 'from', src_grd.name, \
'to', dst_grd.name
print 'time =', time
if ndim == 3:
# flood the grid
print 'flood the grid'
src_varz = pyroms_toolbox.BGrid_SODA.flood(src_var, src_grd, Bpos=Bpos, spval=spval, \
dmax=dmax, cdepth=cdepth, kk=kk)
else:
src_varz = src_var
# horizontal interpolation using scrip weights
print 'horizontal interpolation using scrip weights'
dst_varz = pyroms.remapping.remap(src_varz, wts_file, \
spval=spval)
if ndim == 3:
# vertical interpolation from standard z level to sigma
print 'vertical interpolation from standard z level to sigma'
dst_var = pyroms.remapping.z2roms(dst_varz[::-1,:,:], dst_grdz, \
dst_grd, Cpos=Cpos, spval=spval, flood=False)
else:
dst_var = dst_varz
# write data in destination file
print 'write data in destination file'
nc.variables['ocean_time'][0] = time
nc.variables[dst_varname][0] = dst_var
# close destination file
nc.close()
if src_varname == 'SSH':
return dst_varz
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
yorkerlin/shogun | examples/undocumented/python_modular/graphical/so_multiclass_director_BMRM.py | 10 | 4344 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from modshogun import RealFeatures
from modshogun import MulticlassModel, MulticlassSOLabels, RealNumber, DualLibQPBMSOSVM, DirectorStructuredModel
from modshogun import BMRM, PPBMRM, P3BMRM, ResultSet, RealVector
from modshogun import StructuredAccuracy
class MulticlassStructuredModel(DirectorStructuredModel):
def __init__(self,features,labels):
DirectorStructuredModel.__init__(self)
self.set_features(features)
self.set_labels(labels)
self.dim = features.get_dim_feature_space()*labels.get_num_classes()
self.n_classes = labels.get_num_classes()
self.n_feats = features.get_dim_feature_space()
#self.use_director_risk()
def get_dim(self):
return self.dim
def argmax(self,w,feat_idx,training):
feature_vector = self.get_features().get_feature_vector(feat_idx)
label = None
if training == True:
label = int(RealNumber.obtain_from_generic(self.get_labels().get_label(feat_idx)).value)
ypred = 0
max_score = -1e10
for c in xrange(self.n_classes):
score = 0.0
for i in xrange(self.n_feats):
score += w[i+self.n_feats*c]*feature_vector[i]
if training == True:
score += (c!=label)
if score > max_score:
max_score = score
ypred = c
res = ResultSet()
res.score = max_score
res.psi_pred = RealVector(self.dim)
res.psi_pred.zero()
for i in xrange(self.n_feats):
res.psi_pred[i+self.n_feats*ypred] = feature_vector[i]
res.argmax = RealNumber(ypred)
if training == True:
res.delta = (label!=ypred)
res.psi_truth = RealVector(self.dim)
res.psi_truth.zero()
for i in xrange(self.n_feats):
res.psi_truth[i+self.n_feats*label] = feature_vector[i]
for i in xrange(self.n_feats):
res.score -= w[i+self.n_feats*label]*feature_vector[i]
return res
def fill_data(cnt, minv, maxv):
x1 = np.linspace(minv, maxv, cnt)
a, b = np.meshgrid(x1, x1)
X = np.array((np.ravel(a), np.ravel(b)))
y = np.zeros((1, cnt*cnt))
tmp = cnt*cnt;
y[0, tmp/3:(tmp/3)*2]=1
y[0, tmp/3*2:(tmp/3)*3]=2
return X, y.flatten()
def gen_data():
covs = np.array([[[0., -1. ], [2.5, .7]],
[[3., -1.5], [1.2, .3]],
[[ 2, 0 ], [ .0, 1.5 ]]])
X = np.r_[np.dot(np.random.randn(N, dim), covs[0]) + np.array([0, 10]),
np.dot(np.random.randn(N, dim), covs[1]) + np.array([-10, -10]),
np.dot(np.random.randn(N, dim), covs[2]) + np.array([10, -10])];
Y = np.hstack((np.zeros(N), np.ones(N), 2*np.ones(N)))
return X, Y
def get_so_labels(out):
N = out.get_num_labels()
l = np.zeros(N)
for i in xrange(N):
l[i] = RealNumber.obtain_from_generic(out.get_label(i)).value
return l
# Number of classes
M = 3
# Number of samples of each class
N = 10
# Dimension of the data
dim = 2
X, y = gen_data()
cnt = 50
X2, y2 = fill_data(cnt, np.min(X), np.max(X))
labels = MulticlassSOLabels(y)
features = RealFeatures(X.T)
model = MulticlassStructuredModel(features, labels)
lambda_ = 1e1
sosvm = DualLibQPBMSOSVM(model, labels, lambda_)
sosvm.set_cleanAfter(10) # number of iterations that cutting plane has to be inactive for to be removed
sosvm.set_cleanICP(True) # enables inactive cutting plane removal feature
sosvm.set_TolRel(0.001) # set relative tolerance
sosvm.set_verbose(True) # enables verbosity of the solver
sosvm.set_cp_models(16) # set number of cutting plane models
sosvm.set_solver(BMRM) # select training algorithm
#sosvm.set_solver(PPBMRM)
#sosvm.set_solver(P3BMRM)
sosvm.train()
res = sosvm.get_result()
Fps = res.get_hist_Fp_vector()
Fds = res.get_hist_Fd_vector()
wdists = res.get_hist_wdist_vector()
plt.figure()
plt.subplot(221)
plt.title('Fp and Fd history')
plt.plot(xrange(res.get_n_iters()), Fps, hold=True)
plt.plot(xrange(res.get_n_iters()), Fds, hold=True)
plt.subplot(222)
plt.title('w dist history')
plt.plot(xrange(res.get_n_iters()), wdists)
# Evaluation
out = sosvm.apply()
Evaluation = StructuredAccuracy()
acc = Evaluation.evaluate(out, labels)
print "Correct classification rate: %0.4f%%" % ( 100.0*acc )
# show figure
Z = get_so_labels(sosvm.apply(RealFeatures(X2)))
x = (X2[0,:]).reshape(cnt, cnt)
y = (X2[1,:]).reshape(cnt, cnt)
z = Z.reshape(cnt, cnt)
plt.subplot(223)
plt.pcolor(x, y, z)
plt.contour(x, y, z, linewidths=1, colors='black', hold=True)
plt.plot(X[:,0], X[:,1], 'yo')
plt.axis('tight')
plt.title('Classification')
plt.show()
| gpl-3.0 |
mojones/Axelrod | axelrod/plot.py | 2 | 9522 |
from numpy.linalg import LinAlgError
from numpy import arange, mean, median
from warnings import warn
matplotlib_installed = True
try:
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
from mpl_toolkits.axes_grid1 import make_axes_locatable
except ImportError:
matplotlib_installed = False
class Plot(object):
def __init__(self, result_set):
self.result_set = result_set
self.matplotlib_installed = matplotlib_installed
## Abstract Box and Violin plots
def _boxplot(self, data, names, title=None):
"""For making boxplots."""
if not self.matplotlib_installed:
return None
nplayers = self.result_set.nplayers
width = max(nplayers / 3, 12)
height = width / 2
figure = plt.figure(figsize=(width, height))
plt.boxplot(data)
plt.xticks(self._boxplot_xticks_locations, names, rotation=90)
plt.tick_params(axis='both', which='both', labelsize=8)
if title:
plt.title(title)
return figure
def _violinplot(self, data, names, title=None):
"""For making violinplots."""
if not self.matplotlib_installed:
return None
nplayers = self.result_set.nplayers
width = max(nplayers / 3, 12)
height = width / 2
figure = plt.figure(figsize=(width, height))
spacing = 4
positions = spacing * arange(1, nplayers + 1, 1)
plt.violinplot(data, positions=positions, widths=spacing/2,
showmedians=True, showextrema=False)
plt.xticks(positions, names, rotation=90)
plt.xlim(0, spacing * (nplayers + 1))
plt.tick_params(axis='both', which='both', labelsize=8)
if title:
plt.title(title)
return figure
## Box and Violin plots for mean score, score diferrences, and wins
@property
def _boxplot_dataset(self):
return [self.result_set.normalised_scores[ir] for ir in self.result_set.ranking]
@property
def _boxplot_xticks_locations(self):
return list(range(1, len(self.result_set.ranked_names) + 2))
@property
def _boxplot_xticks_labels(self):
return [str(n) for n in self.result_set.ranked_names]
@property
def _boxplot_title(self):
return ("Mean score per stage game over {} "
"turns repeated {} times ({} strategies)").format(
self.result_set.turns,
self.result_set.repetitions,
len(self.result_set.ranking))
def boxplot(self):
"""For the specific mean score boxplot."""
data = self._boxplot_dataset
names = self._boxplot_xticks_labels
title = self._boxplot_title
try:
figure = self._violinplot(data, names, title=title)
except LinAlgError:
# Matplotlib doesn't handle single point distributions well
# in violin plots. Should be fixed in next release:
# https://github.com/matplotlib/matplotlib/pull/4816
# Fall back to boxplot
figure = self._boxplot(data, names, title=title)
return figure
@property
def _winplot_dataset(self):
# Sort wins by median
wins = self.result_set.wins
players = self.result_set.players
medians = map(median, wins)
medians = sorted([(m, i) for (i, m) in enumerate(medians)], reverse=True)
# Reorder and grab names
wins = [wins[x[-1]] for x in medians]
ranked_names = [str(players[x[-1]]) for x in medians]
return wins, ranked_names
@property
def _winplot_title(self):
return ("Distributions of wins:"
" {} turns repeated {} times ({} strategies)").format(
self.result_set.turns,
self.result_set.repetitions,
len(self.result_set.ranking))
def winplot(self):
"""Plots the distributions for the number of wins for each strategy."""
if not self.matplotlib_installed:
return None
data, names = self._winplot_dataset
title = self._winplot_title
try:
figure = self._violinplot(data, names, title)
except LinAlgError:
# Matplotlib doesn't handle single point distributions well
# in violin plots. Should be fixed in next release:
# https://github.com/matplotlib/matplotlib/pull/4816
# Fall back to boxplot
figure = self._boxplot(data, names, title)
# Expand ylim a bit
maximum = max(max(w) for w in data)
plt.ylim(-0.5, 0.5 + maximum)
return figure
@property
def _sdv_plot_title(self):
return ("Distributions of payoff differences per stage game over {} "
"turns repeated {} times ({} strategies)").format(
self.result_set.turns,
self.result_set.repetitions,
len(self.result_set.ranking))
@property
def _sd_ordering(self):
return self.result_set.ranking
## Sort by median then max
#from operator import itemgetter
#diffs = self.result_set.score_diffs
#to_sort = [(median(d), max(d), i) for (i, d) in enumerate(diffs)]
#to_sort.sort(reverse=True, key=itemgetter(0, 1))
#ordering = [x[-1] for x in to_sort]
#return ordering
@property
def _sdv_plot_dataset(self):
ordering = self._sd_ordering
diffs = self.result_set.score_diffs
players = self.result_set.players
# Reorder and grab names
diffs = [diffs[i] for i in ordering]
ranked_names = [str(players[i]) for i in ordering]
return diffs, ranked_names
def sdvplot(self):
"""Score difference violinplots to visualize the distributions of how
players attain their payoffs."""
diffs, ranked_names = self._sdv_plot_dataset
title = self._sdv_plot_title
figure = self._violinplot(diffs, ranked_names, title)
return figure
## Payoff heatmaps
@property
def _payoff_dataset(self):
return [[self.result_set.payoff_matrix[r1][r2]
for r2 in self.result_set.ranking]
for r1 in self.result_set.ranking]
@property
def _pdplot_dataset(self):
# Order like the sdv_plot
ordering = self._sd_ordering
pdm = self.result_set.payoff_diffs_means
# Reorder and grab names
matrix = [[pdm[r1][r2] for r2 in ordering]
for r1 in ordering]
players = self.result_set.players
ranked_names = [str(players[i]) for i in ordering]
return matrix, ranked_names
def _payoff_heatmap(self, data, names, title=None):
"""Generic heatmap plot"""
if not self.matplotlib_installed:
return None
nplayers = self.result_set.nplayers
width = max(nplayers / 4, 12)
height = width
figure, ax = plt.subplots()
figure.set_figwidth(width)
figure.set_figheight(height)
mat = ax.matshow(data, cmap='YlGnBu')
plt.xticks(range(self.result_set.nplayers))
plt.yticks(range(self.result_set.nplayers))
ax.set_xticklabels(names, rotation=90)
ax.set_yticklabels(names)
plt.tick_params(axis='both', which='both', labelsize=16)
# Make the colorbar match up with the plot
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(mat, cax=cax)
if title:
plt.title(title)
return figure
def pdplot(self):
"""Payoff difference heatmap to visualize the distributions of how
players attain their payoffs."""
matrix, names = self._pdplot_dataset
return self._payoff_heatmap(matrix, names)
def payoff(self):
"""Payoff heatmap to visualize the distributions of how
players attain their payoffs."""
data = self._payoff_dataset
names = self.result_set.ranked_names
return self._payoff_heatmap(data, names)
## Ecological Plot
def stackplot(self, eco):
if not self.matplotlib_installed:
return None
if type(eco) is list:
warn("""Passing the population sizes as an argument is deprecated and will be removed, please pass the Ecosystem directly""")
populations = eco
else:
populations = eco.population_sizes
figure, ax = plt.subplots()
turns = range(len(populations))
pops = [[populations[iturn][ir] for iturn in turns] for ir in self.result_set.ranking]
ax.stackplot(turns, *pops)
ax.yaxis.tick_left()
ax.yaxis.set_label_position("right")
ax.yaxis.labelpad = 25.0
plt.ylim([0.0, 1.0])
plt.ylabel('Relative population size')
plt.xlabel('Turn')
plt.title("Strategy population dynamics based on average payoffs")
trans = transforms.blended_transform_factory(ax.transAxes, ax.transData)
ticks = []
for i, n in enumerate(self.result_set.ranked_names):
x = -0.01
y = (i + 0.5) * 1.0 / self.result_set.nplayers
ax.annotate(n, xy=(x, y), xycoords=trans, clip_on=False, va='center', ha='right', fontsize=5)
ticks.append(y)
ax.set_yticks(ticks)
ax.tick_params(direction='out')
ax.set_yticklabels([])
ax.set_xscale('log')
return figure
| mit |
cbertinato/pandas | pandas/tests/dtypes/test_inference.py | 1 | 49345 | """
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
import collections
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from fractions import Fraction
from io import StringIO
from numbers import Number
import re
import numpy as np
import pytest
import pytz
from pandas._libs import iNaT, lib, missing as libmissing
import pandas.util._test_decorators as td
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
ensure_categorical, ensure_int32, is_bool, is_datetime64_any_dtype,
is_datetime64_dtype, is_datetime64_ns_dtype, is_datetime64tz_dtype,
is_float, is_integer, is_number, is_scalar, is_scipy_sparse,
is_timedelta64_dtype, is_timedelta64_ns_dtype)
import pandas as pd
from pandas import (
Categorical, DataFrame, DateOffset, DatetimeIndex, Index, Interval, Period,
Series, Timedelta, TimedeltaIndex, Timestamp, isna)
from pandas.util import testing as tm
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(np.int64))
class A:
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1]), collections.defaultdict()])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize("ll", [
'1', 1, [1, 2], (1, 2), range(2), Index([1]),
dict, collections.defaultdict, Series
])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike:
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like():
class MockFile:
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass:
pass
class UnhashableClass1:
__hash__ = None
class UnhashableClass2:
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.abc.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', 'x',
r'asdf',
re.compile('adsf'),
r'\u2233\s*',
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference:
def test_infer_dtype_bytes(self):
compare = 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr, skipna=True) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr, skipna=True) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference:
# Dummy class used for testing with Python objects
class Dummy:
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
@pytest.mark.parametrize('skipna', [True, False])
def test_length_zero(self, skipna):
result = lib.infer_dtype(np.array([], dtype='i4'), skipna=skipna)
assert result == 'integer'
result = lib.infer_dtype([], skipna=skipna)
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr, skipna=skipna)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'integer'
def test_deprecation(self):
# GH 24050
arr = np.array([1, 2, 3], dtype=object)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = lib.infer_dtype(arr) # default: skipna=None -> warn
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
result = lib.infer_dtype(arr, skipna=False)
assert result == 'mixed'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr, skipna=True)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr, skipna=True)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr, skipna=True)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'decimal'
# complex is compatible with nan, so skipna has no effect
@pytest.mark.parametrize('skipna', [True, False])
def test_complex(self, skipna):
# gets cast to complex on array construction
arr = np.array([1.0, 2.0, 1 + 1j])
result = lib.infer_dtype(arr, skipna=skipna)
assert result == 'complex'
arr = np.array([1.0, 2.0, 1 + 1j], dtype='O')
result = lib.infer_dtype(arr, skipna=skipna)
assert result == 'mixed'
# gets cast to complex on array construction
arr = np.array([1, np.nan, 1 + 1j])
result = lib.infer_dtype(arr, skipna=skipna)
assert result == 'complex'
arr = np.array([1.0, np.nan, 1 + 1j], dtype='O')
result = lib.infer_dtype(arr, skipna=skipna)
assert result == 'mixed'
# complex with nans stays complex
arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype='O')
result = lib.infer_dtype(arr, skipna=skipna)
assert result == 'complex'
# test smaller complex dtype; will pass through _try_infer_map fastpath
arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype=np.complex64)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == 'complex'
def test_string(self):
pass
def test_unicode(self):
arr = ['a', np.nan, 'c']
result = lib.infer_dtype(arr, skipna=False)
assert result == 'mixed'
arr = ['a', np.nan, 'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr, skipna=True) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr, skipna=True) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr, skipna=True) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr, skipna=True) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr, skipna=True) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr, skipna=True) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr, skipna=True) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr, skipna=True) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr, skipna=True) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr, skipna=True) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr, skipna=True) == 'empty'
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr, skipna=True) == 'empty'
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr, skipna=False) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr, skipna=False) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr, skipna=False) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr, skipna=False) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr, skipna=False) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr, skipna=False) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr, skipna=False) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr, skipna=False) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr, skipna=False) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr, skipna=False) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='US/Eastern')],
dtype=object))
assert not lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='CET')],
dtype=object))
@pytest.mark.parametrize(
"func",
[
'is_datetime_array',
'is_datetime64_array',
'is_bool_array',
'is_timedelta_or_timedelta64_array',
'is_date_array',
'is_time_array',
'is_interval_array',
'is_period_array'])
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(['foo', 'bar'])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates, skipna=False)
assert result == 'mixed'
result = lib.infer_dtype(dates, skipna=True)
assert result == 'date'
def test_is_numeric_array(self):
assert lib.is_float_array(np.array([1, 2.0]))
assert lib.is_float_array(np.array([1, 2.0, np.nan]))
assert not lib.is_float_array(np.array([1, 2]))
assert lib.is_integer_array(np.array([1, 2]))
assert not lib.is_integer_array(np.array([1, 2.0]))
def test_is_string_array(self):
assert lib.is_string_array(np.array(['foo', 'bar']))
assert not lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=False)
assert lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=True)
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr, skipna=False)
assert result == 'mixed'
result = lib.infer_dtype(arr, skipna=True)
assert result == 'empty'
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
assert lib.is_period(pd.Period('2011-01', freq='M'))
assert not lib.is_period(pd.PeriodIndex(['2011-01'], freq='M'))
assert not lib.is_period(pd.Timestamp('2011-01'))
assert not lib.is_period(1)
assert not lib.is_period(np.nan)
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr, skipna=True)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr), skipna=True)
assert result == 'categorical'
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr, skipna=True)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr), skipna=True)
assert result == 'categorical'
class TestNumberScalar:
def test_is_number(self):
assert is_number(True)
assert is_number(1)
assert is_number(1.1)
assert is_number(1 + 3j)
assert is_number(np.bool(False))
assert is_number(np.int64(1))
assert is_number(np.float64(1.1))
assert is_number(np.complex128(1 + 3j))
assert is_number(np.nan)
assert not is_number(None)
assert not is_number('x')
assert not is_number(datetime(2011, 1, 1))
assert not is_number(np.datetime64('2011-01-01'))
assert not is_number(Timestamp('2011-01-01'))
assert not is_number(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_number(timedelta(1000))
assert not is_number(Timedelta('1 days'))
# questionable
assert not is_number(np.bool_(False))
assert is_number(np.timedelta64(1, 'D'))
def test_is_bool(self):
assert is_bool(True)
assert is_bool(np.bool(False))
assert is_bool(np.bool_(False))
assert not is_bool(1)
assert not is_bool(1.1)
assert not is_bool(1 + 3j)
assert not is_bool(np.int64(1))
assert not is_bool(np.float64(1.1))
assert not is_bool(np.complex128(1 + 3j))
assert not is_bool(np.nan)
assert not is_bool(None)
assert not is_bool('x')
assert not is_bool(datetime(2011, 1, 1))
assert not is_bool(np.datetime64('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_bool(timedelta(1000))
assert not is_bool(np.timedelta64(1, 'D'))
assert not is_bool(Timedelta('1 days'))
def test_is_integer(self):
assert is_integer(1)
assert is_integer(np.int64(1))
assert not is_integer(True)
assert not is_integer(1.1)
assert not is_integer(1 + 3j)
assert not is_integer(np.bool(False))
assert not is_integer(np.bool_(False))
assert not is_integer(np.float64(1.1))
assert not is_integer(np.complex128(1 + 3j))
assert not is_integer(np.nan)
assert not is_integer(None)
assert not is_integer('x')
assert not is_integer(datetime(2011, 1, 1))
assert not is_integer(np.datetime64('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_integer(timedelta(1000))
assert not is_integer(Timedelta('1 days'))
# questionable
assert is_integer(np.timedelta64(1, 'D'))
def test_is_float(self):
assert is_float(1.1)
assert is_float(np.float64(1.1))
assert is_float(np.nan)
assert not is_float(True)
assert not is_float(1)
assert not is_float(1 + 3j)
assert not is_float(np.bool(False))
assert not is_float(np.bool_(False))
assert not is_float(np.int64(1))
assert not is_float(np.complex128(1 + 3j))
assert not is_float(None)
assert not is_float('x')
assert not is_float(datetime(2011, 1, 1))
assert not is_float(np.datetime64('2011-01-01'))
assert not is_float(Timestamp('2011-01-01'))
assert not is_float(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_float(timedelta(1000))
assert not is_float(np.timedelta64(1, 'D'))
assert not is_float(Timedelta('1 days'))
def test_is_datetime_dtypes(self):
ts = pd.date_range('20130101', periods=3)
tsa = pd.date_range('20130101', periods=3, tz='US/Eastern')
assert is_datetime64_dtype('datetime64')
assert is_datetime64_dtype('datetime64[ns]')
assert is_datetime64_dtype(ts)
assert not is_datetime64_dtype(tsa)
assert not is_datetime64_ns_dtype('datetime64')
assert is_datetime64_ns_dtype('datetime64[ns]')
assert is_datetime64_ns_dtype(ts)
assert is_datetime64_ns_dtype(tsa)
assert is_datetime64_any_dtype('datetime64')
assert is_datetime64_any_dtype('datetime64[ns]')
assert is_datetime64_any_dtype(ts)
assert is_datetime64_any_dtype(tsa)
assert not is_datetime64tz_dtype('datetime64')
assert not is_datetime64tz_dtype('datetime64[ns]')
assert not is_datetime64tz_dtype(ts)
assert is_datetime64tz_dtype(tsa)
for tz in ['US/Eastern', 'UTC']:
dtype = 'datetime64[ns, {}]'.format(tz)
assert not is_datetime64_dtype(dtype)
assert is_datetime64tz_dtype(dtype)
assert is_datetime64_ns_dtype(dtype)
assert is_datetime64_any_dtype(dtype)
def test_is_timedelta(self):
assert is_timedelta64_dtype('timedelta64')
assert is_timedelta64_dtype('timedelta64[ns]')
assert not is_timedelta64_ns_dtype('timedelta64')
assert is_timedelta64_ns_dtype('timedelta64[ns]')
tdi = TimedeltaIndex([1e14, 2e14], dtype='timedelta64[ns]')
assert is_timedelta64_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi.astype('timedelta64[ns]'))
# Conversion to Int64Index:
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64'))
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64[h]'))
class TestIsScalar:
def test_is_scalar_builtin_scalars(self):
assert is_scalar(None)
assert is_scalar(True)
assert is_scalar(False)
assert is_scalar(Number())
assert is_scalar(Fraction())
assert is_scalar(0.)
assert is_scalar(np.nan)
assert is_scalar('foobar')
assert is_scalar(b'foobar')
assert is_scalar(datetime(2014, 1, 1))
assert is_scalar(date(2014, 1, 1))
assert is_scalar(time(12, 0))
assert is_scalar(timedelta(hours=1))
assert is_scalar(pd.NaT)
def test_is_scalar_builtin_nonscalars(self):
assert not is_scalar({})
assert not is_scalar([])
assert not is_scalar([1])
assert not is_scalar(())
assert not is_scalar((1, ))
assert not is_scalar(slice(None))
assert not is_scalar(Ellipsis)
def test_is_scalar_numpy_array_scalars(self):
assert is_scalar(np.int64(1))
assert is_scalar(np.float64(1.))
assert is_scalar(np.int32(1))
assert is_scalar(np.object_('foobar'))
assert is_scalar(np.str_('foobar'))
assert is_scalar(np.unicode_('foobar'))
assert is_scalar(np.bytes_(b'foobar'))
assert is_scalar(np.datetime64('2014-01-01'))
assert is_scalar(np.timedelta64(1, 'h'))
def test_is_scalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h')),
np.array(np.datetime64('NaT'))]:
assert not is_scalar(zerodim)
assert is_scalar(lib.item_from_zerodim(zerodim))
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_is_scalar_numpy_arrays(self):
assert not is_scalar(np.array([]))
assert not is_scalar(np.array([[]]))
assert not is_scalar(np.matrix('1; 2'))
def test_is_scalar_pandas_scalars(self):
assert is_scalar(Timestamp('2014-01-01'))
assert is_scalar(Timedelta(hours=1))
assert is_scalar(Period('2014-01-01'))
assert is_scalar(Interval(left=0, right=1))
assert is_scalar(DateOffset(days=1))
def test_is_scalar_pandas_containers(self):
assert not is_scalar(Series())
assert not is_scalar(Series([1]))
assert not is_scalar(DataFrame())
assert not is_scalar(DataFrame([[1]]))
assert not is_scalar(Index([]))
assert not is_scalar(Index([1]))
def test_datetimeindex_from_empty_datetime64_array():
for unit in ['ms', 'us', 'ns']:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert (len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A': np.asarray(range(10), dtype='float64'),
'B': Timestamp('20010101')
}))
df.iloc[3:6, :] = np.nan
result = df.loc[4, 'B'].value
assert (result == iNaT)
s = df['B'].copy()
s._data = s._data.setitem(indexer=tuple([slice(8, 9)]), value=np.nan)
assert (isna(s[8]))
assert (s[8].value == np.datetime64('NaT').astype(np.int64))
@td.skip_if_no_scipy
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_is_scipy_sparse(spmatrix): # noqa: F811
assert is_scipy_sparse(spmatrix([[0, 1]]))
assert not is_scipy_sparse(np.array([1]))
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = ensure_int32(values)
assert (result.dtype == np.int32)
values = np.arange(10, dtype=np.int64)
result = ensure_int32(values)
assert (result.dtype == np.int32)
def test_ensure_categorical():
values = np.arange(10, dtype=np.int32)
result = ensure_categorical(values)
assert (result.dtype == 'category')
values = Categorical(values)
result = ensure_categorical(values)
tm.assert_categorical_equal(result, values)
| bsd-3-clause |
yunfeilu/scikit-learn | sklearn/semi_supervised/label_propagation.py | 71 | 15342 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted, check_array
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse = ['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
datalayer/zeppelin | python/src/main/resources/grpc/python/zeppelin_python.py | 9 | 4436 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from io import BytesIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class PyZeppelinContext(object):
""" A context impl that uses Py4j to communicate to JVM
"""
def __init__(self, z):
self.z = z
self.paramOption = gateway.jvm.org.apache.zeppelin.display.ui.OptionInput.ParamOption
self.javaList = gateway.jvm.java.util.ArrayList
self.max_result = z.getMaxResult()
def input(self, name, defaultValue=""):
return self.z.input(name, defaultValue)
def textbox(self, name, defaultValue=""):
return self.z.textbox(name, defaultValue)
def noteTextbox(self, name, defaultValue=""):
return self.z.noteTextbox(name, defaultValue)
def select(self, name, options, defaultValue=""):
return self.z.select(name, defaultValue, self.getParamOptions(options))
def noteSelect(self, name, options, defaultValue=""):
return self.z.noteSelect(name, defaultValue, self.getParamOptions(options))
def checkbox(self, name, options, defaultChecked=[]):
return self.z.checkbox(name, self.getDefaultChecked(defaultChecked), self.getParamOptions(options))
def noteCheckbox(self, name, options, defaultChecked=[]):
return self.z.noteCheckbox(name, self.getDefaultChecked(defaultChecked), self.getParamOptions(options))
def getParamOptions(self, options):
javaOptions = gateway.new_array(self.paramOption, len(options))
i = 0
for tuple in options:
javaOptions[i] = self.paramOption(tuple[0], tuple[1])
i += 1
return javaOptions
def getDefaultChecked(self, defaultChecked):
javaDefaultChecked = self.javaList()
for check in defaultChecked:
javaDefaultChecked.append(check)
return javaDefaultChecked
def show(self, p, **kwargs):
if type(p).__name__ == "DataFrame": # does not play well with sub-classes
# `isinstance(p, DataFrame)` would req `import pandas.core.frame.DataFrame`
# and so a dependency on pandas
self.show_dataframe(p, **kwargs)
elif hasattr(p, '__call__'):
p() #error reporting
def show_dataframe(self, df, show_index=False, **kwargs):
"""Pretty prints DF using Table Display System
"""
limit = len(df) > self.max_result
header_buf = StringIO("")
if show_index:
idx_name = str(df.index.name) if df.index.name is not None else ""
header_buf.write(idx_name + "\t")
header_buf.write(str(df.columns[0]))
for col in df.columns[1:]:
header_buf.write("\t")
header_buf.write(str(col))
header_buf.write("\n")
body_buf = StringIO("")
rows = df.head(self.max_result).values if limit else df.values
index = df.index.values
for idx, row in zip(index, rows):
if show_index:
body_buf.write("%html <strong>{}</strong>".format(idx))
body_buf.write("\t")
body_buf.write(str(row[0]))
for cell in row[1:]:
body_buf.write("\t")
body_buf.write(str(cell))
body_buf.write("\n")
body_buf.seek(0); header_buf.seek(0)
#TODO(bzz): fix it, so it shows red notice, as in Spark
print("%table " + header_buf.read() + body_buf.read()) # +
# ("\n<font color=red>Results are limited by {}.</font>" \
# .format(self.max_result) if limit else "")
#)
body_buf.close(); header_buf.close()
# start JVM gateway
client = GatewayClient(address='127.0.0.1', port=${JVM_GATEWAY_PORT})
gateway = JavaGateway(client)
java_import(gateway.jvm, "org.apache.zeppelin.display.Input")
intp = gateway.entry_point
z = __zeppelin__ = PyZeppelinContext(intp.getZeppelinContext())
| apache-2.0 |
tkaitchuck/nupic | external/darwin64/lib/python2.6/site-packages/matplotlib/artist.py | 69 | 33042 | from __future__ import division
import re, warnings
import matplotlib
import matplotlib.cbook as cbook
from transforms import Bbox, IdentityTransform, TransformedBbox, TransformedPath
from path import Path
## Note, matplotlib artists use the doc strings for set and get
# methods to enable the introspection methods of setp and getp. Every
# set_* method should have a docstring containing the line
#
# ACCEPTS: [ legal | values ]
#
# and aliases for setters and getters should have a docstring that
# starts with 'alias for ', as in 'alias for set_somemethod'
#
# You may wonder why we use so much boiler-plate manually defining the
# set_alias and get_alias functions, rather than using some clever
# python trick. The answer is that I need to be able to manipulate
# the docstring, and there is no clever way to do that in python 2.2,
# as far as I can see - see
# http://groups.google.com/groups?hl=en&lr=&threadm=mailman.5090.1098044946.5135.python-list%40python.org&rnum=1&prev=/groups%3Fq%3D__doc__%2Bauthor%253Ajdhunter%2540ace.bsd.uchicago.edu%26hl%3Den%26btnG%3DGoogle%2BSearch
class Artist(object):
"""
Abstract base class for someone who renders into a
:class:`FigureCanvas`.
"""
aname = 'Artist'
zorder = 0
def __init__(self):
self.figure = None
self._transform = None
self._transformSet = False
self._visible = True
self._animated = False
self._alpha = 1.0
self.clipbox = None
self._clippath = None
self._clipon = True
self._lod = False
self._label = ''
self._picker = None
self._contains = None
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
self.axes = None
self._remove_method = None
self._url = None
self.x_isdata = True # False to avoid updating Axes.dataLim with x
self.y_isdata = True # with y
self._snap = None
def remove(self):
"""
Remove the artist from the figure if possible. The effect
will not be visible until the figure is redrawn, e.g., with
:meth:`matplotlib.axes.Axes.draw_idle`. Call
:meth:`matplotlib.axes.Axes.relim` to update the axes limits
if desired.
Note: :meth:`~matplotlib.axes.Axes.relim` will not see
collections even if the collection was added to axes with
*autolim* = True.
Note: there is no support for removing the artist's legend entry.
"""
# There is no method to set the callback. Instead the parent should set
# the _remove_method attribute directly. This would be a protected
# attribute if Python supported that sort of thing. The callback
# has one parameter, which is the child to be removed.
if self._remove_method != None:
self._remove_method(self)
else:
raise NotImplementedError('cannot remove artist')
# TODO: the fix for the collections relim problem is to move the
# limits calculation into the artist itself, including the property
# of whether or not the artist should affect the limits. Then there
# will be no distinction between axes.add_line, axes.add_patch, etc.
# TODO: add legend support
def have_units(self):
'Return *True* if units are set on the *x* or *y* axes'
ax = self.axes
if ax is None or ax.xaxis is None:
return False
return ax.xaxis.have_units() or ax.yaxis.have_units()
def convert_xunits(self, x):
"""For artists in an axes, if the xaxis has units support,
convert *x* using xaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.xaxis is None:
#print 'artist.convert_xunits no conversion: ax=%s'%ax
return x
return ax.xaxis.convert_units(x)
def convert_yunits(self, y):
"""For artists in an axes, if the yaxis has units support,
convert *y* using yaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.yaxis is None: return y
return ax.yaxis.convert_units(y)
def set_axes(self, axes):
"""
Set the :class:`~matplotlib.axes.Axes` instance in which the
artist resides, if any.
ACCEPTS: an :class:`~matplotlib.axes.Axes` instance
"""
self.axes = axes
def get_axes(self):
"""
Return the :class:`~matplotlib.axes.Axes` instance the artist
resides in, or *None*
"""
return self.axes
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
"""
try: del self._propobservers[oid]
except KeyError: pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in self._propobservers.items():
func(self)
def is_transform_set(self):
"""
Returns *True* if :class:`Artist` has a transform explicitly
set.
"""
return self._transformSet
def set_transform(self, t):
"""
Set the :class:`~matplotlib.transforms.Transform` instance
used by this artist.
ACCEPTS: :class:`~matplotlib.transforms.Transform` instance
"""
self._transform = t
self._transformSet = True
self.pchanged()
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this artist.
"""
if self._transform is None:
self._transform = IdentityTransform()
return self._transform
def hitlist(self, event):
"""
List the children of the artist which contain the mouse event *event*.
"""
import traceback
L = []
try:
hascursor,info = self.contains(event)
if hascursor:
L.append(self)
except:
traceback.print_exc()
print "while checking",self.__class__
for a in self.get_children():
L.extend(a.hitlist(event))
return L
def get_children(self):
"""
Return a list of the child :class:`Artist`s this
:class:`Artist` contains.
"""
return []
def contains(self, mouseevent):
"""Test whether the artist contains the mouse event.
Returns the truth value and a dictionary of artist specific details of
selection, such as which points are contained in the pick radius. See
individual artists for details.
"""
if callable(self._contains): return self._contains(self,mouseevent)
#raise NotImplementedError,str(self.__class__)+" needs 'contains' method"
warnings.warn("'%s' needs 'contains' method" % self.__class__.__name__)
return False,{}
def set_contains(self,picker):
"""
Replace the contains test used by this artist. The new picker
should be a callable function which determines whether the
artist is hit by the mouse event::
hit, props = picker(artist, mouseevent)
If the mouse event is over the artist, return *hit* = *True*
and *props* is a dictionary of properties you want returned
with the contains test.
ACCEPTS: a callable function
"""
self._contains = picker
def get_contains(self):
"""
Return the _contains test used by the artist, or *None* for default.
"""
return self._contains
def pickable(self):
'Return *True* if :class:`Artist` is pickable.'
return (self.figure is not None and
self.figure.canvas is not None and
self._picker is not None)
def pick(self, mouseevent):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if *mouseevent* is over
the artist and the artist has picker set
"""
# Pick self
if self.pickable():
picker = self.get_picker()
if callable(picker):
inside,prop = picker(self,mouseevent)
else:
inside,prop = self.contains(mouseevent)
if inside:
self.figure.canvas.pick_event(mouseevent, self, **prop)
# Pick children
for a in self.get_children():
a.pick(mouseevent)
def set_picker(self, picker):
"""
Set the epsilon for picking used by this artist
*picker* can be one of the following:
* *None*: picking is disabled for this artist (default)
* A boolean: if *True* then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist
* A float: if picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if it's data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, e.g. the indices of the data within
epsilon of the pick event
* A function: if picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event::
hit, props = picker(artist, mouseevent)
to determine the hit test. if the mouse event is over the
artist, return *hit=True* and props is a dictionary of
properties you want added to the PickEvent attributes.
ACCEPTS: [None|float|boolean|callable]
"""
self._picker = picker
def get_picker(self):
'Return the picker object used by this artist'
return self._picker
def is_figure_set(self):
"""
Returns True if the artist is assigned to a
:class:`~matplotlib.figure.Figure`.
"""
return self.figure is not None
def get_url(self):
"""
Returns the url
"""
return self._url
def set_url(self, url):
"""
Sets the url for the artist
"""
self._url = url
def get_snap(self):
"""
Returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg backends.
"""
return self._snap
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg backends.
"""
self._snap = snap
def get_figure(self):
"""
Return the :class:`~matplotlib.figure.Figure` instance the
artist belongs to.
"""
return self.figure
def set_figure(self, fig):
"""
Set the :class:`~matplotlib.figure.Figure` instance the artist
belongs to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
self.figure = fig
self.pchanged()
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
self.clipbox = clipbox
self.pchanged()
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
* a :class:`~matplotlib.patches.Patch` (or subclass) instance
* a :class:`~matplotlib.path.Path` instance, in which case
an optional :class:`~matplotlib.transforms.Transform`
instance may be provided, which will be applied to the
path before using it for clipping.
* *None*, to remove the clipping path
For efficiency, if the path happens to be an axis-aligned
rectangle, this method will set the clipping box to the
corresponding rectangle and set the clipping path to *None*.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
from patches import Patch, Rectangle
success = False
if transform is None:
if isinstance(path, Rectangle):
self.clipbox = TransformedBbox(Bbox.unit(), path.get_transform())
self._clippath = None
success = True
elif isinstance(path, Patch):
self._clippath = TransformedPath(
path.get_path(),
path.get_transform())
success = True
if path is None:
self._clippath = None
success = True
elif isinstance(path, Path):
self._clippath = TransformedPath(path, transform)
success = True
if not success:
print type(path), type(transform)
raise TypeError("Invalid arguments to set_clip_path")
self.pchanged()
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on all
backends
"""
return self._alpha
def get_visible(self):
"Return the artist's visiblity"
return self._visible
def get_animated(self):
"Return the artist's animated state"
return self._animated
def get_clip_on(self):
'Return whether artist uses clipping'
return self._clipon
def get_clip_box(self):
'Return artist clipbox'
return self.clipbox
def get_clip_path(self):
'Return artist clip path'
return self._clippath
def get_transformed_clip_path_and_affine(self):
'''
Return the clip path with the non-affine part of its
transformation applied, and the remaining affine part of its
transformation.
'''
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
ACCEPTS: [True | False]
"""
self._clipon = b
self.pchanged()
def _set_gc_clip(self, gc):
'Set the clip properly for the gc'
if self._clipon:
if self.clipbox is not None:
gc.set_clip_rectangle(self.clipbox)
gc.set_clip_path(self._clippath)
else:
gc.set_clip_rectangle(None)
gc.set_clip_path(None)
def draw(self, renderer, *args, **kwargs):
'Derived classes drawing method'
if not self.get_visible(): return
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
ACCEPTS: float (0.0 transparent through 1.0 opaque)
"""
self._alpha = alpha
self.pchanged()
def set_lod(self, on):
"""
Set Level of Detail on or off. If on, the artists may examine
things like the pixel width of the axes and draw a subset of
their contents accordingly
ACCEPTS: [True | False]
"""
self._lod = on
self.pchanged()
def set_visible(self, b):
"""
Set the artist's visiblity.
ACCEPTS: [True | False]
"""
self._visible = b
self.pchanged()
def set_animated(self, b):
"""
Set the artist's animation state.
ACCEPTS: [True | False]
"""
self._animated = b
self.pchanged()
def update(self, props):
"""
Update the properties of this :class:`Artist` from the
dictionary *prop*.
"""
store = self.eventson
self.eventson = False
changed = False
for k,v in props.items():
func = getattr(self, 'set_'+k, None)
if func is None or not callable(func):
raise AttributeError('Unknown property %s'%k)
func(v)
changed = True
self.eventson = store
if changed: self.pchanged()
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: any string
"""
self._label = s
self.pchanged()
def get_zorder(self):
"""
Return the :class:`Artist`'s zorder.
"""
return self.zorder
def set_zorder(self, level):
"""
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
ACCEPTS: any number
"""
self.zorder = level
self.pchanged()
def update_from(self, other):
'Copy properties from *other* to *self*.'
self._transform = other._transform
self._transformSet = other._transformSet
self._visible = other._visible
self._alpha = other._alpha
self.clipbox = other.clipbox
self._clipon = other._clipon
self._clippath = other._clippath
self._lod = other._lod
self._label = other._label
self.pchanged()
def set(self, **kwargs):
"""
A tkstyle set command, pass *kwargs* to set properties
"""
ret = []
for k,v in kwargs.items():
k = k.lower()
funcName = "set_%s"%k
func = getattr(self,funcName)
ret.extend( [func(v)] )
return ret
def findobj(self, match=None):
"""
pyplot signature:
findobj(o=gcf(), match=None)
Recursively find all :class:matplotlib.artist.Artist instances
contained in self.
*match* can be
- None: return all objects contained in artist (including artist)
- function with signature ``boolean = match(artist)`` used to filter matches
- class instance: eg Line2D. Only return artists of class type
.. plot:: mpl_examples/pylab_examples/findobj_demo.py
"""
if match is None: # always return True
def matchfunc(x): return True
elif cbook.issubclass_safe(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif callable(match):
matchfunc = match
else:
raise ValueError('match must be None, an matplotlib.artist.Artist subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc for thisc in c.findobj(matchfunc) if matchfunc(thisc)])
if matchfunc(self):
artists.append(self)
return artists
class ArtistInspector:
"""
A helper class to inspect an :class:`~matplotlib.artist.Artist`
and return information about it's settable properties and their
current values.
"""
def __init__(self, o):
"""
Initialize the artist inspector with an
:class:`~matplotlib.artist.Artist` or sequence of
:class:`Artists`. If a sequence is used, we assume it is a
homogeneous sequence (all :class:`Artists` are of the same
type) and it is your responsibility to make sure this is so.
"""
if cbook.iterable(o) and len(o): o = o[0]
self.oorig = o
if not isinstance(o, type):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
def get_aliases(self):
"""
Get a dict mapping *fullname* -> *alias* for each *alias* in
the :class:`~matplotlib.artist.ArtistInspector`.
Eg., for lines::
{'markerfacecolor': 'mfc',
'linewidth' : 'lw',
}
"""
names = [name for name in dir(self.o) if
(name.startswith('set_') or name.startswith('get_'))
and callable(getattr(self.o,name))]
aliases = {}
for name in names:
func = getattr(self.o, name)
if not self.is_alias(func): continue
docstring = func.__doc__
fullname = docstring[10:]
aliases.setdefault(fullname[4:], {})[name[4:]] = None
return aliases
_get_valid_values_regex = re.compile(r"\n\s*ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))")
def get_valid_values(self, attr):
"""
Get the legal arguments for the setter associated with *attr*.
This is done by querying the docstring of the function *set_attr*
for a line that begins with ACCEPTS:
Eg., for a line linestyle, return
[ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
"""
name = 'set_%s'%attr
if not hasattr(self.o, name):
raise AttributeError('%s has no function %s'%(self.o,name))
func = getattr(self.o, name)
docstring = func.__doc__
if docstring is None: return 'unknown'
if docstring.startswith('alias for '):
return None
match = self._get_valid_values_regex.search(docstring)
if match is not None:
return match.group(1).replace('\n', ' ')
return 'unknown'
def _get_setters_and_targets(self):
"""
Get the attribute strings and a full path to where the setter
is defined for all setters in an object.
"""
setters = []
for name in dir(self.o):
if not name.startswith('set_'): continue
o = getattr(self.o, name)
if not callable(o): continue
func = o
if self.is_alias(func): continue
source_class = self.o.__module__ + "." + self.o.__name__
for cls in self.o.mro():
if name in cls.__dict__:
source_class = cls.__module__ + "." + cls.__name__
break
setters.append((name[4:], source_class + "." + name))
return setters
def get_setters(self):
"""
Get the attribute strings with setters for object. Eg., for a line,
return ``['markerfacecolor', 'linewidth', ....]``.
"""
return [prop for prop, target in self._get_setters_and_targets()]
def is_alias(self, o):
"""
Return *True* if method object *o* is an alias for another
function.
"""
ds = o.__doc__
if ds is None: return False
return ds.startswith('alias for ')
def aliased_name(self, s):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME.
E.g. for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
return s + ''.join([' or %s' % x for x in self.aliasd[s].keys()])
else:
return s
def aliased_name_rest(self, s, target):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME formatted for ReST
E.g. for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
aliases = ''.join([' or %s' % x for x in self.aliasd[s].keys()])
else:
aliases = ''
return ':meth:`%s <%s>`%s' % (s, target, aliases)
def pprint_setters(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' %(pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name(prop)
lines.append('%s%s: %s' %(pad, name, accepts))
return lines
def pprint_setters_rest(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values. Format the output for ReST
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' %(pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
########
names = [self.aliased_name_rest(prop, target) for prop, target in attrs]
accepts = [self.get_valid_values(prop) for prop, target in attrs]
col0_len = max([len(n) for n in names])
col1_len = max([len(a) for a in accepts])
table_formatstr = pad + '='*col0_len + ' ' + '='*col1_len
lines.append('')
lines.append(table_formatstr)
lines.append(pad + 'Property'.ljust(col0_len+3) + \
'Description'.ljust(col1_len))
lines.append(table_formatstr)
lines.extend([pad + n.ljust(col0_len+3) + a.ljust(col1_len)
for n, a in zip(names, accepts)])
lines.append(table_formatstr)
lines.append('')
return lines
########
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name_rest(prop, path)
lines.append('%s%s: %s' %(pad, name, accepts))
return lines
def pprint_getters(self):
"""
Return the getters and actual values as list of strings.
"""
o = self.oorig
getters = [name for name in dir(o)
if name.startswith('get_')
and callable(getattr(o, name))]
#print getters
getters.sort()
lines = []
for name in getters:
func = getattr(o, name)
if self.is_alias(func): continue
try: val = func()
except: continue
if getattr(val, 'shape', ()) != () and len(val)>6:
s = str(val[:6]) + '...'
else:
s = str(val)
s = s.replace('\n', ' ')
if len(s)>50:
s = s[:50] + '...'
name = self.aliased_name(name[4:])
lines.append(' %s = %s' %(name, s))
return lines
def findobj(self, match=None):
"""
Recursively find all :class:`matplotlib.artist.Artist`
instances contained in *self*.
If *match* is not None, it can be
- function with signature ``boolean = match(artist)``
- class instance: eg :class:`~matplotlib.lines.Line2D`
used to filter matches.
"""
if match is None: # always return True
def matchfunc(x): return True
elif issubclass(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif callable(match):
matchfunc = func
else:
raise ValueError('match must be None, an matplotlib.artist.Artist subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc for thisc in c.findobj(matchfunc) if matchfunc(thisc)])
if matchfunc(self):
artists.append(self)
return artists
def getp(o, property=None):
"""
Return the value of handle property. property is an optional string
for the property you want to return
Example usage::
getp(o) # get all the object properties
getp(o, 'linestyle') # get the linestyle property
*o* is a :class:`Artist` instance, eg
:class:`~matplotllib.lines.Line2D` or an instance of a
:class:`~matplotlib.axes.Axes` or :class:`matplotlib.text.Text`.
If the *property* is 'somename', this function returns
o.get_somename()
:func:`getp` can be used to query all the gettable properties with
``getp(o)``. Many properties have aliases for shorter typing, e.g.
'lw' is an alias for 'linewidth'. In the output, aliases and full
property names will be listed as:
property or alias = value
e.g.:
linewidth or lw = 2
"""
insp = ArtistInspector(o)
if property is None:
ret = insp.pprint_getters()
print '\n'.join(ret)
return
func = getattr(o, 'get_' + property)
return func()
# alias
get = getp
def setp(h, *args, **kwargs):
"""
matplotlib supports the use of :func:`setp` ("set property") and
:func:`getp` to set and get object properties, as well as to do
introspection on the object. For example, to set the linestyle of a
line to be dashed, you can do::
>>> line, = plot([1,2,3])
>>> setp(line, linestyle='--')
If you want to know the valid types of arguments, you can provide the
name of the property you want to set without a value::
>>> setp(line, 'linestyle')
linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
If you want to see all the properties that can be set, and their
possible values, you can do::
>>> setp(line)
... long output listing omitted
:func:`setp` operates on a single instance or a list of instances.
If you are in query mode introspecting the possible values, only
the first instance in the sequence is used. When actually setting
values, all the instances will be set. E.g., suppose you have a
list of two lines, the following will make both lines thicker and
red::
>>> x = arange(0,1.0,0.01)
>>> y1 = sin(2*pi*x)
>>> y2 = sin(4*pi*x)
>>> lines = plot(x, y1, x, y2)
>>> setp(lines, linewidth=2, color='r')
:func:`setp` works with the matlab(TM) style string/value pairs or
with python kwargs. For example, the following are equivalent::
>>> setp(lines, 'linewidth', 2, 'color', r') # matlab style
>>> setp(lines, linewidth=2, color='r') # python style
"""
insp = ArtistInspector(h)
if len(kwargs)==0 and len(args)==0:
print '\n'.join(insp.pprint_setters())
return
if len(kwargs)==0 and len(args)==1:
print insp.pprint_setters(prop=args[0])
return
if not cbook.iterable(h): h = [h]
else: h = cbook.flatten(h)
if len(args)%2:
raise ValueError('The set args must be string, value pairs')
funcvals = []
for i in range(0, len(args)-1, 2):
funcvals.append((args[i], args[i+1]))
funcvals.extend(kwargs.items())
ret = []
for o in h:
for s, val in funcvals:
s = s.lower()
funcName = "set_%s"%s
func = getattr(o,funcName)
ret.extend( [func(val)] )
return [x for x in cbook.flatten(ret)]
def kwdoc(a):
hardcopy = matplotlib.rcParams['docstring.hardcopy']
if hardcopy:
return '\n'.join(ArtistInspector(a).pprint_setters_rest(leadingspace=2))
else:
return '\n'.join(ArtistInspector(a).pprint_setters(leadingspace=2))
kwdocd = dict()
kwdocd['Artist'] = kwdoc(Artist)
| gpl-3.0 |
ishank08/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
edhuckle/statsmodels | statsmodels/datasets/star98/data.py | 25 | 3880 | """Star98 Educational Testing dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = "Star98 Educational Dataset"
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Math scores for 303 student with 10 explanatory factors"""
DESCRLONG = """
This data is on the California education policy and outcomes (STAR program
results for 1998. The data measured standardized testing by the California
Department of Education that required evaluation of 2nd - 11th grade students
by the the Stanford 9 test on a variety of subjects. This dataset is at
the level of the unified school district and consists of 303 cases. The
binary response variable represents the number of 9th graders scoring
over the national median value on the mathematics exam.
The data used in this example is only a subset of the original source.
"""
NOTE = """::
Number of Observations - 303 (counties in California).
Number of Variables - 13 and 8 interaction terms.
Definition of variables names::
NABOVE - Total number of students above the national median for the
math section.
NBELOW - Total number of students below the national median for the
math section.
LOWINC - Percentage of low income students
PERASIAN - Percentage of Asian student
PERBLACK - Percentage of black students
PERHISP - Percentage of Hispanic students
PERMINTE - Percentage of minority teachers
AVYRSEXP - Sum of teachers' years in educational service divided by the
number of teachers.
AVSALK - Total salary budget including benefits divided by the number
of full-time teachers (in thousands)
PERSPENK - Per-pupil spending (in thousands)
PTRATIO - Pupil-teacher ratio.
PCTAF - Percentage of students taking UC/CSU prep courses
PCTCHRT - Percentage of charter schools
PCTYRRND - Percentage of year-round schools
The below variables are interaction terms of the variables defined
above.
PERMINTE_AVYRSEXP
PEMINTE_AVSAL
AVYRSEXP_AVSAL
PERSPEN_PTRATIO
PERSPEN_PCTAF
PTRATIO_PCTAF
PERMINTE_AVTRSEXP_AVSAL
PERSPEN_PTRATIO_PCTAF
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the star98 data and returns a Dataset class instance.
Returns
-------
Load instance:
a class of the data with array attrbutes 'endog' and 'exog'
"""
data = _get_data()
return du.process_recarray(data, endog_idx=[0, 1], dtype=float)
def load_pandas():
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=['NABOVE', 'NBELOW'],
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
names = ["NABOVE","NBELOW","LOWINC","PERASIAN","PERBLACK","PERHISP",
"PERMINTE","AVYRSEXP","AVSALK","PERSPENK","PTRATIO","PCTAF",
"PCTCHRT","PCTYRRND","PERMINTE_AVYRSEXP","PERMINTE_AVSAL",
"AVYRSEXP_AVSAL","PERSPEN_PTRATIO","PERSPEN_PCTAF","PTRATIO_PCTAF",
"PERMINTE_AVYRSEXP_AVSAL","PERSPEN_PTRATIO_PCTAF"]
data = recfromtxt(open(filepath + '/star98.csv',"rb"), delimiter=",",
names=names, skip_header=1, dtype=float)
# careful now
nabove = data['NABOVE'].copy()
nbelow = data['NBELOW'].copy()
data['NABOVE'] = nbelow # successes
data['NBELOW'] = nabove - nbelow # now failures
return data
| bsd-3-clause |
Tong-Chen/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 3 | 8902 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
ElasticNetCV)
def test_sparse_coef():
""" Check that the sparse_coef propery works """
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.todense().tolist()[0], clf.coef_)
def test_normalize_option():
""" Check that the normalize option in enet works """
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
"""Check that the sparse lasso can handle zero data without crashing"""
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
"""Test ElasticNet for various values of alpha and l1_ratio with list X"""
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
"""Test ElasticNet for various values of alpha and l1_ratio with sparse
X"""
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples / 2:], X[:n_samples / 2]
y_train, y_test = y[n_samples / 2:], y[:n_samples / 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.todense(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples / 2:], X[:n_samples / 2]
y_train, y_test = y[n_samples / 2:], y[:n_samples / 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.todense(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
| bsd-3-clause |
UDST/urbansim | urbansim/utils/tests/test_testing.py | 3 | 2073 |
import pandas as pd
import pytest
from .. import testing
def test_frames_equal_not_frames():
frame = pd.DataFrame({'a': [1]})
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(frame, 1)
assert str(info.value) == 'Inputs must both be pandas DataFrames.'
def test_frames_equal_mismatched_columns():
expected = pd.DataFrame({'a': [1]})
actual = pd.DataFrame({'b': [2]})
try:
testing.assert_frames_equal(actual, expected)
except AssertionError:
pass
else:
raise AssertionError
def test_frames_equal_mismatched_rows():
expected = pd.DataFrame({'a': [1]}, index=[0])
actual = pd.DataFrame({'a': [1]}, index=[1])
try:
testing.assert_frames_equal(actual, expected)
except AssertionError:
pass
else:
raise AssertionError
def test_frames_equal_mismatched_items():
expected = pd.DataFrame({'a': [1]})
actual = pd.DataFrame({'a': [2]})
try:
testing.assert_frames_equal(actual, expected)
except AssertionError:
pass
else:
raise AssertionError
def test_frames_equal():
frame = pd.DataFrame({'a': [1]})
testing.assert_frames_equal(frame, frame)
def test_frames_equal_close():
frame1 = pd.DataFrame({'a': [1]})
frame2 = pd.DataFrame({'a': [1.00000000000002]})
with pytest.raises(AssertionError):
testing.assert_frames_equal(frame1, frame2)
testing.assert_frames_equal(frame1, frame2, use_close=True)
def test_index_equal_order_agnostic():
left = pd.Index([1, 2, 3])
right = pd.Index([3, 2, 1])
testing.assert_index_equal(left, right)
def test_index_equal_order_agnostic_raises_left():
left = pd.Index([1, 2, 3, 4])
right = pd.Index([3, 2, 1])
with pytest.raises(AssertionError):
testing.assert_index_equal(left, right)
def test_index_equal_order_agnostic_raises_right():
left = pd.Index([1, 2, 3])
right = pd.Index([3, 2, 1, 4])
with pytest.raises(AssertionError):
testing.assert_index_equal(left, right)
| bsd-3-clause |
hrantzsch/signature-verification | tools/tsne/tsne.py | 1 | 5950 | #
# tsne.py
#
# Implementation of t-SNE in Python. The implementation was tested on Python 2.7.10, and it requires a working
# installation of NumPy. The implementation comes with an example on the MNIST dataset. In order to plot the
# results of this example, a working installation of matplotlib is required.
#
# The example can be run by executing: `ipython tsne.py`
#
#
# Created by Laurens van der Maaten on 20-12-08.
# Copyright (c) 2008 Tilburg University. All rights reserved.
import numpy as Math
import matplotlib.pyplot as Plot
def Hbeta(D = Math.array([]), beta = 1.0):
"""Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution."""
# Compute P-row and corresponding perplexity
P = Math.exp(-D.copy() * beta);
sumP = sum(P);
H = Math.log(sumP) + beta * Math.sum(D * P) / sumP;
P = P / sumP;
return H, P;
def x2p(X = Math.array([]), tol = 1e-5, perplexity = 30.0):
"""Performs a binary search to get P-values in such a way that each conditional Gaussian has the same perplexity."""
# Initialize some variables
print("Computing pairwise distances...")
(n, d) = X.shape;
sum_X = Math.sum(Math.square(X), 1);
D = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X);
P = Math.zeros((n, n));
beta = Math.ones((n, 1));
logU = Math.log(perplexity);
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 500 == 0:
print("Computing P-values for point {} of {} ...".format(i, n))
pass
# Compute the Gaussian kernel and entropy for the current precision
betamin = -Math.inf;
betamax = Math.inf;
Di = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))];
(H, thisP) = Hbeta(Di, beta[i]);
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU;
tries = 0;
while Math.abs(Hdiff) > tol and tries < 50:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i].copy();
if betamax == Math.inf or betamax == -Math.inf:
beta[i] = beta[i] * 2;
else:
beta[i] = (beta[i] + betamax) / 2;
else:
betamax = beta[i].copy();
if betamin == Math.inf or betamin == -Math.inf:
beta[i] = beta[i] / 2;
else:
beta[i] = (beta[i] + betamin) / 2;
# Recompute the values
(H, thisP) = Hbeta(Di, beta[i]);
Hdiff = H - logU;
tries = tries + 1;
# Set the final row of P
P[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))] = thisP;
# Return final P-matrix
print("Mean value of sigma: {}".format(Math.mean(Math.sqrt(1 / beta))));
return P;
def pca(X = Math.array([]), no_dims = 50):
"""Runs PCA on the NxD array X in order to reduce its dimensionality to no_dims dimensions."""
print("Preprocessing the data using PCA...")
(n, d) = X.shape;
X = X - Math.tile(Math.mean(X, 0), (n, 1));
(l, M) = Math.linalg.eig(Math.dot(X.T, X));
Y = Math.dot(X, M[:,0:no_dims]);
return Y;
def tsne(X = Math.array([]), no_dims = 2, initial_dims = 50, perplexity = 30.0):
"""Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions.
The syntaxis of the function is Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array."""
# Check inputs
if isinstance(no_dims, float):
print("Error: array X should have type float.");
return -1;
if round(no_dims) != no_dims:
print("Error: number of dimensions should be an integer.");
return -1;
# Initialize variables
X = pca(X, initial_dims).real;
(n, d) = X.shape;
max_iter = 1000;
initial_momentum = 0.5;
final_momentum = 0.8;
eta = 500;
min_gain = 0.01;
Y = Math.random.randn(n, no_dims);
dY = Math.zeros((n, no_dims));
iY = Math.zeros((n, no_dims));
gains = Math.ones((n, no_dims));
# Compute P-values
P = x2p(X, 1e-5, perplexity);
P = P + Math.transpose(P);
P = P / Math.sum(P);
P = P * 4; # early exaggeration
P = Math.maximum(P, 1e-12);
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_Y = Math.sum(Math.square(Y), 1);
num = 1 / (1 + Math.add(Math.add(-2 * Math.dot(Y, Y.T), sum_Y).T, sum_Y));
num[range(n), range(n)] = 0;
Q = num / Math.sum(num);
Q = Math.maximum(Q, 1e-12);
# Compute gradient
PQ = P - Q;
for i in range(n):
dY[i,:] = Math.sum(Math.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (Y[i,:] - Y), 0);
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0));
gains[gains < min_gain] = min_gain;
iY = momentum * iY - eta * (gains * dY);
Y = Y + iY;
Y = Y - Math.tile(Math.mean(Y, 0), (n, 1));
# Compute current value of cost function
if (iter + 1) % 10 == 0:
C = Math.sum(P * Math.log(P / Q));
print("Iteration {}: error is {}".format(iter + 1, C))
# Stop lying about P-values
if iter == 100:
P = P / 4;
# Return solution
return Y;
if __name__ == "__main__":
print("Run Y = tsne.tsne(X, no_dims, perplexity) to perform t-SNE on your dataset.")
print("Running example on 2,500 MNIST digits...")
X = Math.loadtxt("mnist2500_X.txt");
labels = Math.loadtxt("mnist2500_labels.txt");
Y = tsne(X, 2, 50, 20.0);
Plot.scatter(Y[:,0], Y[:,1], 20, labels);
Plot.show();
| gpl-3.0 |
ukoethe/iiboost | python/tests/python_test_raw.py | 2 | 1992 | ###################################################################################
# Simple test script, doesn't make use of the python wrapper class, just raw ctypes
# Only use this to test basic functionality, otherwise use python_test_class.py
###################################################################################
import numpy as np
from sklearn.externals import joblib
import ctypes
# gets 'prop' from every element in L
# and puts it in an array of element type cArrayElType
def propToCArray( L, prop, cArrayElType ):
N = len(L)
arr = (cArrayElType * N)()
for idx,e in enumerate(L):
arr[idx] = cArrayElType( eval( "e." + prop ) )
return arr
# load data
print "--- Loading data ---"
gts = [joblib.load("../../testData/gt.jlb")]
imgs = [joblib.load("../../testData/img.jlb")]
print "--- Loading lib ---"
boostLib = ctypes.CDLL("../../build/python/libiiboost_python.so")
# this returns a python string
boostLib.serializeModel.restype = ctypes.py_object
print "--- Calling train() ---"
# we need to pass an array to the C call
widthList = propToCArray( imgs, "shape[2]", ctypes.c_int)
heightList = propToCArray( imgs, "shape[1]", ctypes.c_int)
depthList = propToCArray( imgs, "shape[0]", ctypes.c_int)
# list of img and gt (pointers)
imgList = propToCArray( imgs, "ctypes.data", ctypes.c_void_p )
gtList = propToCArray( gts, "ctypes.data", ctypes.c_void_p )
debugOutput = 1
numStumps = 10
model = ctypes.c_void_p(
boostLib.train( imgList, gtList,
widthList, heightList, depthList,
ctypes.c_int(1),
ctypes.c_int(numStumps),
ctypes.c_int(debugOutput) ) )
print "--- Serializing model ---"
serStr = ctypes.py_object( boostLib.serializeModel( model ) )
# pre-alloc prediction
pred = np.empty_like( imgs[0], dtype=np.dtype("float32") )
print "--- Predicting ---"
boostLib.predict( model,
ctypes.c_void_p(imgs[0].ctypes.data),
widthList[0], heightList[0], depthList[0],
ctypes.c_void_p(pred.ctypes.data) )
| gpl-3.0 |
sonnyhu/scikit-learn | sklearn/semi_supervised/label_propagation.py | 17 | 15941 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# License: BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.graph import graph_laplacian
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
# Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
cmorgan/pysystemtrade | syscore/accounting.py | 1 | 56500 | """
Suite of things to work out p&l, and statistics thereof
"""
from copy import copy, deepcopy
import pandas as pd
from pandas.tseries.offsets import BDay
import numpy as np
from scipy.stats import skew, ttest_rel, ttest_1samp
import scipy.stats as stats
import random
from syscore.algos import robust_vol_calc
from syscore.pdutils import drawdown
from syscore.dateutils import BUSINESS_DAYS_IN_YEAR, ROOT_BDAYS_INYEAR, WEEKS_IN_YEAR, ROOT_WEEKS_IN_YEAR
from syscore.dateutils import MONTHS_IN_YEAR, ROOT_MONTHS_IN_YEAR
"""
some defaults
"""
DEFAULT_CAPITAL = 10000000.0
DEFAULT_ANN_RISK_TARGET = 0.16
DEFAULT_DAILY_CAPITAL = (
DEFAULT_CAPITAL * DEFAULT_ANN_RISK_TARGET / ROOT_BDAYS_INYEAR)
def account_test(ac1, ac2):
"""
Given two Account like objects performs a two sided t test of normalised returns
:param ac1: first set of returns
:type ac1: accountCurve or pd.DataFrame of returns
:param ac2: second set of returns
:type ac2: accountCurve or pd.DataFrame of returns
:returns: 2 tuple: difference in means, t-test results
"""
common_ts = sorted(set(list(ac1.index)) & set(list(ac2.index)))
ac1_common = ac1.cumsum().reindex(common_ts, method="ffill").diff().values
ac2_common = ac2.cumsum().reindex(common_ts, method="ffill").diff().values
missing_values = [
idx for idx in range(len(common_ts))
if (np.isnan(ac1_common[idx]) or np.isnan(ac2_common[idx]))
]
ac1_common = [
ac1_common[idx] for idx in range(len(common_ts))
if idx not in missing_values
]
ac2_common = [
ac2_common[idx] for idx in range(len(common_ts))
if idx not in missing_values
]
ac1_common = ac1_common / np.nanstd(ac1_common)
ac2_common = ac2_common / np.nanstd(ac2_common)
diff = np.mean(ac1_common) - np.mean(ac2_common)
return (diff, ttest_rel(ac1_common, ac2_common))
def pandl_with_data(price,
trades=None,
marktomarket=True,
positions=None,
delayfill=True,
roundpositions=False,
get_daily_returns_volatility=None,
forecast=None,
fx=None,
daily_risk_capital=None,
value_of_price_point=1.0):
"""
Calculate pandl for an individual position
If marktomarket=True, and trades is provided, calculate pandl both at
open/close and mark to market in between
If trades is not provided, work out using positions. If delayfill is True,
assume we get filled at the next price after the trade
If roundpositions is True when working out trades from positions,
then round; otherwise assume we trade fractional lots
If positions are not provided, work out position using forecast and
volatility (this will be for an arbitrary daily risk target)
If volatility is not provided, work out from price
If fx is not provided, assume fx rate is 1.0 and work out p&l in currency
of instrument
If value_of_price_point is not provided, assume is 1.0 (block size is value
of 1 price point, eg 100 if you're buying 100 shares for one instrument
block)
:param price: price series
:type price: Tx1 pd.Series
:param trades: set of trades done NOT always aligned to price
:type trades: Tx2 pd.DataFrame columns ['trades', 'fill_price'] or None
:param marktomarket: If trades provided: Should we mark to market, or just
use traded prices?
:type marktomarket: bool
:param positions: series of positions NOT ALWAYS aligned to price
:type positions: Tx1 pd.Series or None
:param delayfill: If no trades provided: should we delay fills?
:type delayfill: bool
:param roundpositions: If no trades provided, should we round positions
when calculating trades?
:type roundpositions: bool
:param get_daily_returns_volatility: series of volatility estimates, used
for calculation of positions aligned to price
:type get_daily_returns_volatility: Tx1 pd.Series or None
:param forecast: series of forecasts, needed to work out positions if missing
:type forecast: Tx1 pd.Series or None
:param daily_risk_capital: needed to work out forecasts. If a time series
must be aligned to price
:type daily_risk_capital: Tx1 pd.Series or None or float
:param fx: series of fx rates from instrument currency to base currency, to
work out p&l in base currency aligned to price
:type fx: Tx1 pd.Series or None
:param value_of_price_point: value of one unit movement in price
:type value_of_price_point: float
:returns: 5- Tuple (positions, trades, instr_ccy_returns, base_ccy_returns,
fx) all Tx1 pd.DataFrames
"""
if price is None:
raise Exception("Can't work p&l without price")
if fx is None:
# assume it's 1.0
use_fx = pd.Series([1.0] * len(price.index), index=price.index)
else:
use_fx = fx.reindex(price.index, method="ffill")
if trades is None:
prices_to_use = copy(price)
if positions is None:
positions = get_positions_from_forecasts(
price, get_daily_returns_volatility, forecast, use_fx,
value_of_price_point, daily_risk_capital)
if roundpositions:
use_positions = positions.round()
else:
use_positions = copy(positions)
if delayfill:
use_positions = use_positions.shift(1)
cum_trades = use_positions.ffill()
trades_to_use = cum_trades.diff()
else:
# have some trades data
if marktomarket:
# want to have both kinds of price
prices_to_use = pd.concat(
[price, trades.fill_price], axis=1, join='outer')
# Where no fill price available, use price
prices_to_use = prices_to_use.fillna(axis=1, method="ffill")
prices_to_use = prices_to_use.fill_price
# alight trades
trades_to_use = trades.reindex(
prices_to_use.index, fill_value=0.0).trades
else:
# only calculate p&l on trades, using fills
trades_to_use = trades.trades
prices_to_use = trades.fill_price.ffill()
cum_trades = trades_to_use.cumsum().ffill()
price_returns = prices_to_use.ffill().diff()
instr_ccy_returns = cum_trades.shift(
1) * price_returns * value_of_price_point
instr_ccy_returns = instr_ccy_returns.cumsum().ffill().reindex(
price.index).diff()
base_ccy_returns = instr_ccy_returns * use_fx
return (cum_trades, trades_to_use, instr_ccy_returns, base_ccy_returns,
use_fx, value_of_price_point)
def get_positions_from_forecasts(price, get_daily_returns_volatility, forecast,
use_fx, value_of_price_point,
daily_risk_capital, **kwargs):
"""
Work out position using forecast, volatility, use_fx, value_of_price_point
(this will be for an arbitrary daily risk target)
If volatility is not provided, work out from price (uses a standard method
so may differ from precise system p&l)
:param price: price series
:type price: Tx1 pd.Series
:param get_daily_returns_volatility: series of volatility estimates. NOT %
volatility, price difference vol ALWAYS matched to price
:type get_daily_returns_volatility: Tx1 pd.Series or None
:param forecast: series of forecasts, needed to work out positions, MATCHED to price
:type forecast: Tx1 pd.Series
:param use_fx: series of fx rates from instrument currency to base currency, to
work out p&l in base currency, MATCHED to price
:type use_fx: Tx1 pd.Series
:param value_of_price_point: value of one unit movement in price
:type value_of_price_point: float
:param daily_risk_capital: Capital at risk
:type capital: float or None or pd.Series aligned to forecast
**kwargs: passed to vol calculation
:returns: Tx1 pd dataframe of positions
"""
if forecast is None:
raise Exception(
"If you don't provide a series of trades or positions, I need a "
"forecast")
if get_daily_returns_volatility is None:
get_daily_returns_volatility = robust_vol_calc(price.diff(), **kwargs)
"""
Herein the proof why this position calculation is correct (see chapters
5-11 of 'systematic trading' book)
Position = forecast x instrument weight x instrument_div_mult x vol_scalar / 10.0
= forecast x instrument weight x instrument_div_mult x daily cash vol target / (10.0 x instr value volatility)
= forecast x instrument weight x instrument_div_mult x daily cash vol target / (10.0 x instr ccy volatility x fx rate)
= forecast x instrument weight x instrument_div_mult x daily cash vol target / (10.0 x block value x % price volatility x fx rate)
= forecast x instrument weight x instrument_div_mult x daily cash vol target / (10.0 x underlying price x 0.01 x value of price move x 100 x price change volatility/(underlying price) x fx rate)
= forecast x instrument weight x instrument_div_mult x daily cash vol target / (10.0 x value of price move x price change volatility x fx rate)
Making some arbitrary assumptions (one instrument, 100% of capital, daily target DAILY_CAPITAL):
= forecast x 1.0 x 1.0 x DAILY_CAPITAL / (10.0 x value of price move x price diff volatility x fx rate)
= forecast x multiplier / (value of price move x price change volatility x fx rate)
"""
if daily_risk_capital is None:
daily_risk_capital = DEFAULT_DAILY_CAPITAL
multiplier = daily_risk_capital * 1.0 * 1.0 / 10.0
denominator = (
value_of_price_point * get_daily_returns_volatility * use_fx)
numerator = forecast * multiplier
positions = numerator.ffill() / denominator.ffill()
return positions
def percent(accurve):
"""
Takes any account curve object
Returns accountCurveSingleElementOneFreq - anything else is lost
"""
pass
class accountCurveSingleElementOneFreq(pd.Series):
"""
A single account curve for one asset (instrument / trading rule variation, ...)
and one part of it (gross, net, costs)
and for one frequency (daily, weekly, monthly...)
Inherits from series
We never init these directly but only as part of accountCurveSingleElement
"""
def __init__(self, returns_df, capital, weighted_flag=False,
frequency="D"):
"""
:param returns_df: series of returns
:type returns_df: Tx1 pd.Series
:param weighted_flag: Does this curve have weighted returns?
:type weighted: bool
:param frequency: Frequency D days, W weeks, M months, Y years
:type frequency: str
:param capital: used to calculate extrapolated and % curves
:type capital: float or pd.Series
"""
super().__init__(returns_df)
try:
returns_scalar = dict(
D=BUSINESS_DAYS_IN_YEAR,
W=WEEKS_IN_YEAR,
M=MONTHS_IN_YEAR,
Y=1)[frequency]
vol_scalar = dict(
D=ROOT_BDAYS_INYEAR,
W=ROOT_WEEKS_IN_YEAR,
M=ROOT_MONTHS_IN_YEAR,
Y=1)[frequency]
except KeyError:
raise Exception("Not a frequency %s" % frequency)
setattr(self, "frequency", frequency)
setattr(self, "_returns_scalar", returns_scalar)
setattr(self, "_vol_scalar", vol_scalar)
setattr(self, "_returns_df", returns_df)
setattr(self, "weighted_flag", weighted_flag)
setattr(self, "capital", capital)
def as_df(self):
print("Deprecated accountCurve.as_df use .as_ts() please")
# backward compatibility
return self.as_ts()
def as_ts(self):
return pd.Series(self._returns_df)
def percent(self):
perc_returns = self.as_percent()
new_curve = accountCurveSingleElementOneFreq(
perc_returns, 100.0, self.weighted_flag, self.frequency)
return new_curve
def cumulative(self):
cum_returns = self.as_cumulative()
new_curve = accountCurveSingleElementOneFreq(
cum_returns, self.capital, self.weighted_flag, self.frequency)
return new_curve
def as_percent(self):
return 100.0 * self.as_ts() / self.capital
def as_cumulative(self):
if isinstance(self.capital, pd.core.series.Series):
print(
"You shouldn't cumulate returns when capital is varying. Using the first value of capital only"
)
use_capital = self.capital[0]
else:
use_capital = self.capital
perc_ac_returns = self.as_percent() / 100.0
cum_returns = (1.0 + perc_ac_returns).cumprod()
cum_returns = cum_returns * use_capital
return cum_returns.diff()
def curve(self):
# we cache this since it's used so much
if hasattr(self, "_curve"):
return self._curve
else:
curve = self.cumsum().ffill()
setattr(self, "_curve", curve)
return curve
def mean(self):
return float(self.as_ts().mean())
def std(self):
return float(self.as_ts().std())
def ann_mean(self):
avg = self.mean()
return avg * self._returns_scalar
def ann_std(self):
period_std = self.std()
return period_std * self._vol_scalar
def sharpe(self):
mean_return = self.ann_mean()
vol = self.ann_std()
try:
sharpe = mean_return / vol
except ZeroDivisionError:
sharpe = np.nan
return sharpe
def drawdown(self):
x = self.curve()
return drawdown(x)
def avg_drawdown(self):
dd = self.drawdown()
return np.nanmean(dd.values)
def worst_drawdown(self):
dd = self.drawdown()
return np.nanmin(dd.values)
def time_in_drawdown(self):
dd = self.drawdown()
dd = [z for z in dd.values if not np.isnan(z)]
in_dd = float(len([z for z in dd if z < 0]))
return in_dd / float(len(dd))
def calmar(self):
return self.ann_mean() / -self.worst_drawdown()
def avg_return_to_drawdown(self):
return self.ann_mean() / -self.avg_drawdown()
def sortino(self):
period_stddev = np.std(self.losses())
ann_stdev = period_stddev * self._vol_scalar
ann_mean = self.ann_mean()
try:
sortino = ann_mean / ann_stdev
except ZeroDivisionError:
sortino = np.nan
return sortino
def vals(self):
x = [z for z in self.values if not np.isnan(z)]
return x
def min(self):
return np.nanmin(self.vals())
def max(self):
return np.max(self.vals())
def median(self):
return np.median(self.vals())
def skew(self):
return skew(self.vals())
def losses(self):
x = self.vals()
return [z for z in x if z < 0]
def gains(self):
x = self.vals()
return [z for z in x if z > 0]
def avg_loss(self):
return np.mean(self.losses())
def avg_gain(self):
return np.mean(self.gains())
def gaintolossratio(self):
return self.avg_gain() / -self.avg_loss()
def profitfactor(self):
return sum(self.gains()) / -sum(self.losses())
def hitrate(self):
no_gains = float(len(self.gains()))
no_losses = float(len(self.losses()))
return no_gains / (no_losses + no_gains)
def rolling_ann_std(self, window=40):
y = self.as_ts().rolling(
window, min_periods=4, center=True).std().to_frame()
return y * self._vol_scalar
def t_test(self):
return ttest_1samp(self.vals(), 0.0)
def t_stat(self):
return float(self.t_test()[0])
def p_value(self):
return float(self.t_test()[1])
def stats(self):
stats_list = [
"min", "max", "median", "mean", "std", "skew", "ann_mean",
"ann_std", "sharpe", "sortino", "avg_drawdown", "time_in_drawdown",
"calmar", "avg_return_to_drawdown", "avg_loss", "avg_gain",
"gaintolossratio", "profitfactor", "hitrate", "t_stat", "p_value"
]
build_stats = []
for stat_name in stats_list:
stat_method = getattr(self, stat_name)
ans = stat_method()
build_stats.append((stat_name, "{0:.4g}".format(ans)))
comment1 = ("You can also plot / print:", [
"rolling_ann_std", "drawdown", "curve", "percent", "cumulative"
])
return [build_stats, comment1]
def __repr__(self):
if self.weighted_flag:
weight_comment = "Weighted"
else:
weight_comment = "Unweighted"
return super().__repr__() + \
"\n %s account curve; use object.stats() to see methods" % weight_comment
class accountCurveSingleElement(accountCurveSingleElementOneFreq):
"""
A single account curve for one asset (instrument / trading rule variation,
...) and one part of it (gross, net, costs)
Inherits from data frame
We never init these directly but only as part of accountCurveSingle
"""
def __init__(self, returns_df, capital, weighted_flag=False):
"""
:param returns_df: series of returns
:type returns_df: Tx1 pd.Series
:param weighted_flag: Is this account curve of weighted returns?
:type weighted_flag: bool
"""
# We often want to use
daily_returns = returns_df.resample("1B").sum()
weekly_returns = returns_df.resample("W").sum()
monthly_returns = returns_df.resample("MS").sum()
annual_returns = returns_df.resample("A").sum()
super().__init__(
daily_returns, capital, frequency="D", weighted_flag=weighted_flag)
setattr(self, "daily",
accountCurveSingleElementOneFreq(
daily_returns,
capital,
frequency="D",
weighted_flag=weighted_flag))
setattr(self, "weekly",
accountCurveSingleElementOneFreq(
weekly_returns,
capital,
frequency="W",
weighted_flag=weighted_flag))
setattr(self, "monthly",
accountCurveSingleElementOneFreq(
monthly_returns,
capital,
frequency="M",
weighted_flag=weighted_flag))
setattr(self, "annual",
accountCurveSingleElementOneFreq(
annual_returns,
capital,
frequency="Y",
weighted_flag=weighted_flag))
def __repr__(self):
return super().__repr__() + \
"\n Use object.freq.method() to access periods (freq=daily, weekly, monthly, annual) default: daily"
class accountCurveSingle(accountCurveSingleElement):
"""
A single account curve for one asset (instrument / trading rule variation, ...)
Inherits from data frame
On the surface we see the 'net' but there's also a gross and cost part included
"""
def __init__(self,
gross_returns,
net_returns,
costs,
capital,
weighted_flag=False):
"""
:param gross_returns: series of returns, no costs applied
:type gross_returns: Tx1 pd.Series
:param costs: series of costs (minus is a cost)
:type costs: Tx1 pd.Series
:param net_returns: series of costs (minus is a cost)
:type net_returns: Tx1 pd.Series
:param weighted_flag: Is this account curve of weighted returns?
:type weighted_flag: bool
:param capital: capital
:type capital: Tx1 pd.Series of float
"""
super().__init__(net_returns, capital, weighted_flag=weighted_flag)
setattr(self, "net",
accountCurveSingleElement(
net_returns, capital, weighted_flag=weighted_flag))
setattr(self, "gross",
accountCurveSingleElement(
gross_returns, capital, weighted_flag=weighted_flag))
setattr(self, "costs",
accountCurveSingleElement(
costs, capital, weighted_flag=weighted_flag))
def __repr__(self):
return super().__repr__() + \
"\n Use object.curve_type.freq.method() (freq=net, gross, costs) default: net"
def to_ncg_frame(self):
"""
View net gross and costs together
:returns: Tx3 pd.DataFrame
"""
ans = pd.concat(
[self.net.as_ts(), self.gross.as_ts(), self.costs.as_ts()], axis=1)
ans.columns = ["net", "gross", "costs"]
return ans
class accountCurve(accountCurveSingle):
def __init__(self,
price=None,
cash_costs=None,
SR_cost=None,
capital=None,
ann_risk_target=None,
pre_calc_data=None,
weighted_flag=False,
weighting=None,
apply_weight_to_costs_only=False,
**kwargs):
"""
Create an account curve; from which many lovely statistics can be gathered
We create by passing **kwargs which will be used by the pandl function
:param cash_cost: Cost in local currency units per instrument block
:type cash_cost: float
:param SR_cost: Cost in annualised Sharpe Ratio units (0.01 = 0.01 SR)
:type SR_cost: float
Note if both are included then cash_cost will be disregarded
:param capital: Capital at risk. Used for % returns, and calculating
daily risk for SR costs
:type capital: None, float, int, or Tx1
:param ann_risk_target: Annual risk target, as % of capital. Used to
calculate daily risk for SR costs
:type ann_risk_target: None or float
:param pre_calc_data: Used by the weighting function, to speed things
up and inherit pre-calculated stuff from an existing account curve
:type pre_calc_data: None or a big tuple
**kwargs passed to profit and loss calculation
(price, trades, marktomarket, positions,
delayfill, roundpositions,
get_daily_returns_volatility, forecast, use_fx,
value_of_price_point)
"""
if pre_calc_data:
(returns_data, base_capital, costs_base_ccy,
unweighted_instr_ccy_pandl) = pre_calc_data
(cum_trades, trades_to_use, instr_ccy_returns, base_ccy_returns,
use_fx, value_of_price_point) = returns_data
else:
"""
Capital is used for:
- going from forecast to position in profit and loss calculation
(fixed or a time series): daily_risk_capital
- calculating costs from SR costs (always a time series): ann_risk
- calculating percentage returns (maybe fixed or variable time
series): base_capital
"""
(base_capital, ann_risk, daily_risk_capital) = resolve_capital(
price, capital, ann_risk_target)
returns_data = pandl_with_data(
price, daily_risk_capital=daily_risk_capital, **kwargs)
(cum_trades, trades_to_use, instr_ccy_returns, base_ccy_returns,
use_fx, value_of_price_point) = returns_data
# always returns a time series
(costs_base_ccy, costs_instr_ccy) = calc_costs(
returns_data, cash_costs, SR_cost, ann_risk)
# keep track of this
unweighted_instr_ccy_pandl = dict(
gross=instr_ccy_returns,
costs=costs_instr_ccy,
net=instr_ccy_returns + costs_instr_ccy)
# Initially we have an unweighted version
self._calc_and_set_returns(
base_ccy_returns,
costs_base_ccy,
base_capital,
weighted_flag=weighted_flag,
weighting=weighting,
apply_weight_to_costs_only=apply_weight_to_costs_only)
# Save all kinds of useful statistics
setattr(self, "unweighted_instr_ccy_pandl", unweighted_instr_ccy_pandl)
setattr(self, "cum_trades", cum_trades)
setattr(self, "trades_to_use", trades_to_use)
setattr(self, "capital", base_capital)
setattr(self, "fx", use_fx)
setattr(self, "value_of_price_point", value_of_price_point)
def _calc_and_set_returns(self,
base_ccy_returns,
costs_base_ccy,
base_capital,
weighted_flag=False,
weighting=None,
apply_weight_to_costs_only=False):
"""
This hidden method is called when we setup the account curve, to
Also called again if we get a weighted version of this account curve
:param base_ccy_returns: Pre-cost returns in base currency terms (unweighted)
:type base_ccy_returns: Tx1 pd.Series
:param costs_base_ccy: Costs in base currency terms, aligned to base_ccy_returns (unweighted)
:type costs_base_ccy: Tx1 pd.Series
:param base_capital: base_capital in base currency terms
:type base_capital: Tx1 pd.Series (aligned to base_ccy_returns) or float
:param weighted_flag: Apply a weighting scheme, or not
:type weighted_flag: bool
:param weighting: Weighting scheme to apply to returns, MAY NOT BE aligned to base_ccy_returns
:type weighting: Tx1 pd.Series
:param apply_weight_to_costs_only: Apply weights only to costs, not gross returns
:type apply_weight_to_costs_only: bool
"""
if weighted_flag:
use_weighting = weighting.reindex(base_ccy_returns.index).ffill()
if not apply_weight_to_costs_only:
# only apply to gross returns if they aren't already weighted
base_ccy_returns = base_ccy_returns * use_weighting
# Always apply to costs
costs_base_ccy = costs_base_ccy * use_weighting
else:
use_weighting = None
net_base_returns = base_ccy_returns + \
costs_base_ccy # costs are negative returns
super().__init__(
base_ccy_returns,
net_base_returns,
costs_base_ccy,
base_capital,
weighted_flag=weighted_flag)
# save useful stats
# have to do this after super() call
setattr(self, "weighted_flag", weighted_flag)
setattr(self, "weighting", use_weighting)
def __repr__(self):
return super().__repr__(
) + "\n Use object.calc_data() to see calculation details"
def calc_data(self):
"""
Returns detailed calculation information
:returns: dictionary of float
"""
calc_items = [
"cum_trades", "trades_to_use", "unweighted_instr_ccy_pandl",
"capital", "weighting", "fx", "value_of_price_point"
]
calc_dict = dict([(calc_name, getattr(self, calc_name))
for calc_name in calc_items])
return calc_dict
def weighted(account_curve,
weighting,
apply_weight_to_costs_only=False,
allow_reweighting=False):
"""
Creates a copy of account curve with weights applied
:param account_curve: Curve to copy from
:type account_curve: accountCurve
:param weighting: Weighting scheme to apply to returns
:type weighting: Tx1 pd.Series
:param apply_weight_to_costs_only: Apply weights only to costs, not gross returns
:type apply_weight_to_costs_only: bool
:param allow_reweighting: Apply weights only to costs, not gross returns
:type allow_reweighting: bool
:returns: accountCurve
"""
if account_curve.weighted_flag:
if allow_reweighting:
pass
else:
raise Exception("You can't reweight weighted returns!")
# very clunky but I can't make copy, deepcopy or inheritance work for this
# use case...
base_capital = copy(account_curve.capital)
gross_returns = copy(account_curve.gross.as_ts())
costs_base_ccy = copy(account_curve.costs.as_ts())
unweighted_instr_ccy_pandl = copy(account_curve.unweighted_instr_ccy_pandl)
returns_data = (account_curve.cum_trades, account_curve.trades_to_use,
unweighted_instr_ccy_pandl["gross"], gross_returns,
account_curve.fx, account_curve.value_of_price_point)
pre_calc_data = (returns_data, base_capital, costs_base_ccy,
unweighted_instr_ccy_pandl)
# Create a cloned account curve with the pre calculated data
# recalculate the returns with weighting applied
new_account_curve = accountCurve(
pre_calc_data=pre_calc_data,
weighted_flag=True,
weighting=weighting,
apply_weight_to_costs_only=apply_weight_to_costs_only)
return new_account_curve
def calc_costs(returns_data, cash_costs, SR_cost, ann_risk):
"""
Calculate costs
:param returns_data: returns data
:type returns_data: 5 tuple returned by pandl data function
:param cash_costs: Cost in local currency units per instrument block
:type cash_costs: 3 tuple of floats; value_total_per_block, value_of_pertrade_commission, percentage_cost
:param SR_cost: Cost in annualised Sharpe Ratio units (0.01 = 0.01 SR)
:type SR_cost: float
Set to None if not using. If both included use SR_cost
:param ann_risk: Capital (capital * ann vol) at risk on annaulised basis. Used for SR calculations
:type ann_risk: Tx1 pd.Series
:returns : Tx1 pd.Series of costs. Minus numbers are losses
"""
(cum_trades, trades_to_use, instr_ccy_returns, base_ccy_returns, use_fx,
value_of_price_point) = returns_data
if SR_cost is not None:
# use SR_cost
ann_cost = -SR_cost * ann_risk
costs_instr_ccy = ann_cost / BUSINESS_DAYS_IN_YEAR
elif cash_costs is not None:
# use cost per blocks
(value_total_per_block, value_of_pertrade_commission,
percentage_cost) = cash_costs
trades_in_blocks = trades_to_use.abs()
costs_blocks = -trades_in_blocks * value_total_per_block
value_of_trades = trades_in_blocks * value_of_price_point
costs_percentage = percentage_cost * value_of_trades
traded = trades_to_use[trades_to_use > 0]
if len(traded) == 0:
costs_pertrade = pd.Series([0.0] * len(cum_trades.index),
cum_trades.index)
else:
costs_pertrade = pd.Series(
[value_of_pertrade_commission] * len(traded.index),
traded.index)
costs_pertrade = costs_pertrade.reindex(trades_to_use.index)
# everything on the trades index, so can do this:s
costs_instr_ccy = costs_blocks + costs_percentage + costs_pertrade
else:
# set costs to zero
costs_instr_ccy = pd.Series([0.0] * len(use_fx), index=use_fx.index)
# fx is on master (price timestamp)
# costs_instr_ccy needs downsampling
costs_instr_ccy = costs_instr_ccy.cumsum().ffill().reindex(
use_fx.index).diff()
costs_base_ccy = costs_instr_ccy * use_fx.ffill()
costs_base_ccy[np.isnan(costs_base_ccy)] = 0.0
return (costs_base_ccy, costs_instr_ccy)
def resolve_capital(ts_to_scale_to, capital=None, ann_risk_target=None):
"""
Resolve and setup capital
We need capital for % returns and possibly for SR stuff
Capital is used for:
- going from forecast to position in profit and loss calculation (fixed
or a time series): daily_risk_capital
- calculating costs from SR costs (always a time series): ann_risk
- calculating percentage returns (maybe fixed or variable time series):
capital
:param ts_to_scale_to: If capital is fixed, what time series to scale it to
:type capital: Tx1 pd.DataFrame
:param capital: Capital at risk. Used for % returns, and calculating daily
risk for SR costs
:type capital: None, int, float or Tx1 pd.DataFrame
:param ann_risk_target: Annual risk target, as % of capital 0.10 is 10%.
Used to calculate daily risk for SR costs
:type ann_risk_target: None or float
:returns tuple: 3 tuple of Tx1 pd.Series / float, pd.Series, pd.Series or float
(capital, ann_risk, daily_risk_capital)
"""
if capital is None:
base_capital = copy(DEFAULT_CAPITAL)
else:
base_capital = copy(capital)
if ann_risk_target is None:
ann_risk_target = DEFAULT_ANN_RISK_TARGET
# might be a float or a Series, depending on capital
daily_risk_capital = base_capital * ann_risk_target / ROOT_BDAYS_INYEAR
if isinstance(base_capital, float) or isinstance(base_capital, int):
ts_capital = pd.Series(
[base_capital] * len(ts_to_scale_to), index=ts_to_scale_to.index)
base_capital = float(base_capital)
else:
ts_capital = copy(base_capital)
# always a time series
ann_risk = ts_capital * ann_risk_target
return (base_capital, ann_risk, daily_risk_capital)
def acc_list_to_pd_frame(list_of_ac_curves, asset_columns):
"""
Returns a pandas data frame
:param list_of_ac_curves: Elements to include
:type list_of_ac_curves: list of any accountcurve like object
:param asset_columns: Names of each asset
:type asset_columns: list of str
:returns: TxN pd.DataFrame
"""
list_of_df = [acc.as_ts() for acc in list_of_ac_curves]
ans = pd.concat(list_of_df, axis=1, join="outer")
ans.columns = asset_columns
ans = ans.cumsum().ffill().diff()
return ans
def total_from_list(list_of_ac_curves, asset_columns, capital):
"""
Return a single accountCurveSingleElement whose returns are the total across the portfolio
:param acc_curve_for_type_list: Elements to include in group
:type acc_curve_for_type_list: list of accountCurveSingleElement
:param asset_columns: Names of each asset
:type asset_columns: list of str
:param capital: Capital, if None will discover from list elements
:type capital: None, float, or pd.Series
:returns: 2 tuple of pd.Series
"""
pdframe = acc_list_to_pd_frame(list_of_ac_curves, asset_columns)
def _resolve_capital_for_total(capital, pdframe):
if isinstance(capital, float):
return pd.Series([capital] * len(pdframe), pdframe.index)
else:
return capital
def _all_float(list_of_ac_curves):
curve_type_float = [isinstance(x, float) for x in list_of_ac_curves]
return all(curve_type_float)
def _resolve_capital_list(pdframe, list_of_ac_curves, capital):
if capital is not None:
return capital
if _all_float(list_of_ac_curves):
capital = np.mean([x.capital for x in list_of_ac_curves])
return
# at least some time series
capital = pd.concat(
[
_resolve_capital_for_total(x.capital, pdframe)
for x in list_of_ac_curves
],
axis=1)
# should all be the same, but just in case ...
capital = np.mean(capital, axis=1)
capital = capital.reindex(pdframe.index).ffill()
return capital
# all on daily freq so just add up
totalac = pdframe.sum(axis=1)
capital = _resolve_capital_list(pdframe, list_of_ac_curves, capital)
return (totalac, capital)
class accountCurveGroupForType(accountCurveSingleElement):
"""
an accountCurveGroup for one cost type (gross, net, costs)
"""
def __init__(self,
acc_curve_for_type_list,
asset_columns,
capital=None,
weighted_flag=False,
curve_type="net"):
"""
Create a group of account curves from a list and some column names
looks like accountCurveSingleElement; outward facing is the total p&L
so acc=accountCurveGroupForType()
acc.mean() ## for the total
Also you can access a instrument (gives an accountCurveSingleElement for an instrument):
acc[instrument_code].mean(), acc[instrument_code].mean()
acc.instrument_code.gross.daily.stats()
acc.to_frame() ## returns a data frame
If you want the original list back:
acc.to_list
Also: eg acc.get_stats("mean", freq="daily")
... Returns a dict of stats
:param acc_curve_for_type_list: Elements to include in group
:type acc_curve_for_type_list: list of accountCurveSingleElement
:param asset_columns: Names of each asset
:type asset_columns: list of str
:param curve_type: Net, gross or costs?
:type curve_type: str
:param weighted_flag: Is this account curve of weighted returns?
:type weighted_flag: bool
:param capital: Capital, if None will discover from list elements
:type capital: None, float, or pd.Series
"""
(acc_total, capital) = total_from_list(acc_curve_for_type_list,
asset_columns, capital)
super().__init__(
acc_total, weighted_flag=weighted_flag, capital=capital)
setattr(self, "to_list", acc_curve_for_type_list)
setattr(self, "asset_columns", asset_columns)
setattr(self, "curve_type", curve_type)
def __getitem__(self, colname):
"""
Overriding this method to access individual curves
:returns: accountCurveSingleElement
"""
try:
ans = self.to_list[self.asset_columns.index(colname)]
except ValueError:
raise Exception("%s not found in account curve" % colname)
return ans
def to_frame(self):
"""
Returns as a data frame, one column is an assets returns
:returns: TxN pd.DataFrame
"""
return acc_list_to_pd_frame(self.to_list, self.asset_columns)
def get_stats(self, stat_method, freq="daily", percent=True):
"""
Create a dictionary summarising statistics across a group of account curves
:param stat_method: Any method of accountCurveSingleElementOneFreq
:type stat_method: str
:param freq: frequency; daily, weekly, monthly or annual
:type freq: str
:param percent: get % returns
:type percent: bool
:returns: statsDict
"""
return statsDict(self, stat_method, freq, percent)
def time_weights(self):
"""
Returns a dict, values are weights according to how much data we have
:returns: dict of floats
"""
def _len_nonzero(ac_curve):
return_df = ac_curve.as_ts()
ans = len([x for x in return_df.values if not np.isnan(x)])
return ans
time_weights_dict = dict(
[(asset_name, _len_nonzero(ac_curve))
for (asset_name,
ac_curve) in zip(self.asset_columns, self.to_list)])
total_weight = sum(time_weights_dict.values())
time_weights_dict = dict([(asset_name, weight / total_weight)
for (asset_name,
weight) in time_weights_dict.items()])
return time_weights_dict
class statsDict(dict):
def __init__(self,
acgroup_for_type,
stat_method,
freq="daily",
percent=True):
"""
Create a dictionary summarising statistics across a group of account curves
:param acgroup_for_type: Account curve group to analyse
:type acgroup_for_type: accountCurveGroupForType
:param stat_method: Any method of accountCurveSingleElementOneFreq
:type stat_method: str
:param freq: frequency; daily, weekly, monthly or annual
:type freq: str
"""
column_names = acgroup_for_type.asset_columns
def _get_stat_from_acobject(acobject, stat_method, freq, percent):
freq_obj = getattr(acobject, freq)
if percent:
freq_obj = freq_obj.percent()
stat_method_function = getattr(freq_obj, stat_method)
return stat_method_function()
dict_values = [(col_name, _get_stat_from_acobject(
acgroup_for_type[col_name], stat_method, freq, percent))
for col_name in column_names]
super().__init__(dict_values)
# We need to augment this with time weightings, in case they are needed
setattr(self, "time_weightings", acgroup_for_type.time_weights())
def weightings(self, timeweighted=False):
"""
Returns a dict of weightings
Either equal weighting, or returns time_weightings
:param timeweighted: Time weight statistics or use simple average
:type: timeweighted: bool
:returns: dict of floats
"""
if timeweighted:
return self.time_weightings
else:
return dict([(asset_name, 1.0 / len(self.values()))
for asset_name in self.keys()])
def mean(self, timeweighted=False):
"""
Return cross sectional mean of statistics
:param timeweighted: Time weight statistics or use simple average
:type: timeweighted: bool
:returns: float
"""
wts = self.weightings(timeweighted)
ans = sum([
asset_value * wts[asset_name]
for (asset_name, asset_value) in self.items()
])
return ans
def std(self, timeweighted=False):
"""
Return cross sectional standard deviation of statistics
:param timeweighted: Time weight statistics or use simple average
:type: timeweighted: bool
:returns: float
"""
wts = self.weightings(timeweighted)
avg = self.mean(timeweighted)
ans = sum([
wts[asset_name] * (asset_value - avg)**2
for (asset_name, asset_value) in self.items()
])**.5
return ans
def tstat(self, timeweighted=False):
"""
Determine if cross section of statistics is significantly different from zero
High t statistic = yes
:param timeweighted: Time weight statistics or use simple average
:type: timeweighted: bool
:returns: float
"""
t_mean = self.mean(timeweighted)
t_std = self.std(timeweighted)
if t_std == 0.0:
return np.nan
return t_mean / t_std
def pvalue(self, timeweighted=False):
"""
Determine if cross section of statistics is significantly different from zero
Low p value = yes
:param timeweighted: Time weight statistics or use simple average
:type: timeweighted: bool
:returns: float
"""
tstat = self.tstat(timeweighted)
n = len(self.values())
if np.isnan(tstat) or n < 2:
return np.nan
pvalue = stats.t.sf(np.abs(tstat), n - 1) # one sided t statistic
return pvalue
class accountCurveGroup(accountCurveSingleElement):
def __init__(self,
acc_curve_list,
asset_columns,
capital=None,
weighted_flag=None):
"""
Create a group of account curves from a list and some column names
looks like accountCurve, so outward facing is the total p&L
No weighting is done, so returns of the total will be simple addition
so acc=accountCurveGroup()
acc.mean()
acc.net.mean()
acc.net.daily.mean()
Also you can access a instrument:
acc[instrument_code].mean(), acc[instrument_code].net.mean()
acc.instrument_code.gross.daily.stats()
acc.to_frame() ## returns a data frame
acc.to_frame("gross") ## returns a data frame
acc.costs.to_frame() ## returns a data frame
If you want the original list back:
acc.to_list
Also: eg acc.get_stats("mean", curve_type="net", freq="daily")
acc.net.get_stats("sharpe", freq="weekly")
... Returns a list of stats
:param acc_curve_list: Curves to group together
:type acc_curve_list: list of accountCurve() objects
:param asset_columns: Names of each asset (same order as acc_curve_list)
:type asset_columns: list of str
:param capital: Capital, if None will discover from list elements
:type capital: None, float, or pd.Series
:param weighted_flag: Is this a weighted_flag account curve? If None then inherits from list.
:type weighted_flag: None or bool
"""
if weighted_flag is None:
weighted_flag = [x.weighted_flag for x in acc_curve_list]
if any(weighted_flag):
if not (all(weighted_flag)):
raise Exception(
"Can't mix weighted_flag and unweighted account curves")
else:
weighted_flag = True
else:
weighted_flag = False
net_list = [getattr(x, "net") for x in acc_curve_list]
gross_list = [getattr(x, "gross") for x in acc_curve_list]
costs_list = [getattr(x, "costs") for x in acc_curve_list]
acc_list_net = accountCurveGroupForType(
net_list,
asset_columns=asset_columns,
capital=capital,
weighted_flag=weighted_flag,
curve_type="net")
acc_list_gross = accountCurveGroupForType(
gross_list,
asset_columns=asset_columns,
capital=capital,
weighted_flag=weighted_flag,
curve_type="gross")
acc_list_costs = accountCurveGroupForType(
costs_list,
asset_columns=asset_columns,
capital=capital,
weighted_flag=weighted_flag,
curve_type="costs")
(acc_total, capital) = total_from_list(net_list, asset_columns,
capital)
super().__init__(
acc_total, weighted_flag=weighted_flag, capital=capital)
setattr(self, "net", acc_list_net)
setattr(self, "gross", acc_list_gross)
setattr(self, "costs", acc_list_costs)
setattr(self, "to_list", acc_curve_list)
setattr(self, "asset_columns", asset_columns)
def __repr__(self):
return super().__repr__() + "\n Multiple curves. Use object.curve_type (curve_type= net, gross, costs)" + \
"\n Useful methods: to_list, asset_columns(), get_stats(), to_frame()"
def __getitem__(self, colname):
"""
Overriding this method to access individual curves
Returns an object of type accountCurve
"""
try:
ans = self.to_list[self.asset_columns.index(colname)]
except ValueError:
raise Exception("%s not found in account curve" % colname)
return ans
def get_stats(self, stat_method, curve_type="net", freq="daily"):
"""
Returns a dict of stats, one per asset
:param stat_method: Any method of accountCurveSingleElementOneFreq
:type stat_method: str
:param curve_type: gross, net or costs
:type curve_type: str
:param freq: frequency; daily, weekly, monthly or annual
:type freq: str
:returns: statsDict, dict like object
"""
subobject = getattr(self, curve_type)
return subobject.get_stats(stat_method, freq=freq)
def to_frame(self, curve_type="net"):
"""
Returns individual return curves as a data frame
:param curve_type: gross, net or costs
:type curve_type: str
:returns: pd.Dataframe TxN
"""
actype = getattr(self, curve_type)
return actype.to_frame()
def stack(self):
"""
Collapse instrument level data into a list of returns in a stack_returns object (pd.TimeSeries)
We can bootstrap this or perform other statistics
:returns: returnStack
"""
returnsStack(self.to_list)
def to_ncg_frame(self):
"""
Returns total account curves for net, gross and costs in a dataframe
:returns: Tx3 pd.Dataframe
"""
ans = pd.concat(
[self.net.as_ts(), self.gross.as_ts(), self.costs.as_ts()], axis=1)
ans.columns = ["net", "gross", "costs"]
return ans
class returnsStack(accountCurveSingle):
"""
Create a stack of returns which we can bootstrap
"""
def __init__(self, returns_list):
"""
Create a stack of returns which we can bootstrap
:param returns_list: returns to be bootstrapped
:type returns_list: List of accountCurve() objects
"""
# Collapse indices to a single one
bs_index_to_use = [list(returns.index) for returns in returns_list]
bs_index_to_use = sum(bs_index_to_use, [])
bs_index_to_use = sorted(set(bs_index_to_use))
# Collapse return lists
curve_type_list = ["gross", "net", "costs"]
def _collapse_one_curve_type(returns_list, curve_type):
collapsed_values = sum([
list(getattr(returns, curve_type).iloc[:, 0].values)
for returns in returns_list
], [])
return collapsed_values
collapsed_curves_values = dict([(curve_type, _collapse_one_curve_type(
returns_list, curve_type)) for curve_type in curve_type_list])
# We set this to an arbitrary index so we can make an account curve
gross_returns_df = pd.Series(
collapsed_curves_values["gross"],
pd.date_range(
start=bs_index_to_use[0],
periods=len(collapsed_curves_values["gross"]),
freq="B"))
net_returns_df = pd.Series(
collapsed_curves_values["net"],
pd.date_range(
start=bs_index_to_use[0],
periods=len(collapsed_curves_values["net"]),
freq="B"))
costs_returns_df = pd.Series(
collapsed_curves_values["costs"],
pd.date_range(
start=bs_index_to_use[0],
periods=len(collapsed_curves_values["costs"]),
freq="B"))
super().__init__(gross_returns_df, net_returns_df, costs_returns_df)
# We need to store this for bootstrapping purposes
setattr(self, "_bs_index_to_use", bs_index_to_use)
def bootstrap(self, no_runs=50, length=None):
"""
Create an accountCurveGroup object containing no_runs, each same length as the
original portfolio (unless length is set)
:param no_runs: Number of runs to do
:type no_runs: int
:param length: Length of each run
:type length: int
:returns: accountCurveGroup, one element for each of no_runs
"""
values_to_sample_from = dict(
gross=list(getattr(self, "gross").iloc[:, 0].values),
net=list(getattr(self, "net").iloc[:, 0].values),
costs=list(getattr(self, "costs").iloc[:, 0].values))
size_of_bucket = len(self.index)
if length is None:
index_to_use = self._bs_index_to_use
length = len(index_to_use)
else:
index_to_use = pd.date_range(
start=self._bs_index_to_use[0], periods=length, freq="B")
bs_list = []
for notUsed in range(no_runs):
sample = [
int(round(random.uniform(0, size_of_bucket - 1)))
for notUsed2 in range(length)
]
# each element of accountCurveGroup is an accountCurveSingle
bs_list.append(
accountCurveSingle(
pd.Series(
[
values_to_sample_from["gross"][xidx]
for xidx in sample
],
index=index_to_use),
pd.Series(
[
values_to_sample_from["net"][xidx]
for xidx in sample
],
index=index_to_use),
pd.Series(
[
values_to_sample_from["costs"][xidx]
for xidx in sample
],
index=index_to_use)))
asset_columns = ["b%d" % idx for idx in range(no_runs)]
return accountCurveGroup(bs_list, asset_columns)
def _DEPRECATED_get_trades_from_positions(
price, positions, delayfill, roundpositions,
get_daily_returns_volatility, forecast, fx, value_of_price_point,
daily_capital):
"""
Work out trades implied by a series of positions
If delayfill is True, assume we get filled at the next price after the
trade
If roundpositions is True when working out trades from positions, then
round; otherwise assume we trade fractional lots
If positions are not provided, work out position using forecast and
volatility (this will be for an arbitrary daily risk target)
If volatility is not provided, work out from price
Args:
price (Tx1 pd.DataFrame): price series
positions (Tx1 pd.DataFrame or None): (series of positions)
delayfill (bool): If calculating trades, should we round positions
first?
roundpositions (bool): If calculating trades, should we round positions
first?
get_daily_returns_volatility (Tx1 pd.DataFrame or None): series of
volatility estimates, used for calculation positions
forecast (Tx1 pd.DataFrame or None): series of forecasts, needed to
work out positions
fx (Tx1 pd.DataFrame or None): series of fx rates from instrument
currency to base currency, to work out p&l in base currency
block_size (float): value of one movement in price
Returns:
Tx1 pd dataframe of trades
"""
if roundpositions:
# round to whole positions
round_positions = positions.round()
else:
round_positions = copy(positions)
# deal with edge cases where we don't have a zero position initially, or
# leading nans
first_row = pd.Series([0.0], index=[round_positions.index[0] - BDay(1)])
round_positions = pd.concat([first_row, round_positions], axis=0)
round_positions = round_positions.ffill()
trades = round_positions.diff()
if delayfill:
# fill will happen one day after we generated trade (being
# conservative)
trades.index = trades.index + pd.DateOffset(1)
# put prices on to correct timestamp
(trades, align_price) = trades.align(price, join="outer")
ans = pd.concat([trades, align_price], axis=1)
ans.columns = ['trades', 'fill_price']
# fill will happen at next valid price if it happens to be missing
ans.fill_price = ans.fill_price.fillna(method="bfill")
# remove zeros (turns into nans)
ans = ans[ans.trades != 0.0]
ans = ans[np.isfinite(ans.trades)]
return ans
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
andrewnc/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
njpayne/euclid | python/main.py | 1 | 18033 | import scipy as sp
import numpy as np
import math
import os
import csv
import data_work
#from sklearn import cross_validation
#from sklearn.preprocessing import OneHotEncoder, LabelEncoder, StandardScaler #used to convert categories to one of k
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import zero_one_loss
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import PolynomialFeatures
import matplotlib.pyplot as plt
import pylab
import classifiers, clustering, regressors
data_location = "../Data" # read data from os.path.join(data_location, <filename>)
results_location = "Results" # save results text/graph to os.path.join(results_location, <filename>)
def run_classifiers(X_train, y_train, X_test, y_test, header):
print("------------------")
print("Running Classifiers")
print("------------------")
print("\n\n--------------------------")
print("Decision Trees")
print("--------------------------")
#create decision tree range
decision_tree_param = {'max_depth': range(1, 200, 10), 'criterion' : ["entropy", "gini"]}
#run the decision tree
prediction, decision_tree_accuracy = classifiers.run_decision_tree(X_train, y_train.flatten(), X_test, y_test.flatten(), passed_parameters = decision_tree_param, headings = header)
print("Decision tree accuracy = %f" % decision_tree_accuracy)
print("\n\n--------------------------")
print("Boosting")
print("--------------------------")
#create boosting range
boosting_param = {'base_estimator__max_depth': range(1, 3), 'n_estimators' : [10, 20], 'learning_rate' : [.75, 1.0] }
#run the boosting
prediction, boosting_accuracy = classifiers.run_boosting(X_train, y_train.flatten(), X_test, y_test.flatten(), passed_parameters = boosting_param)
print("Boosting accuracy = %f" % boosting_accuracy)
print("\n\n--------------------------")
print("k - Nearest Neighbors")
print("--------------------------")
#create knn range
knn_param = {'n_neighbors': range(1, 20), 'weights': ['uniform', 'distance'], 'p': [1, 2], 'algorithm' : ['auto'], 'metric': ['euclidean']} #, 'manhattan', 'chebyshev', 'minkowski', 'wminkowski', 'seuclidean', 'mahalanobis']}
#run the knn
prediction, knn_accuracy = classifiers.run_k_nearest_neighbors(X_train, y_train.flatten(), X_test, y_test.flatten(), passed_parameters = knn_param)
print("k-NN accuracy = %f" % knn_accuracy)
print("\n\n--------------------------")
print("SVM")
print("--------------------------")
#create svm range
svm_param = {'kernel': ['rbf', 'linear', 'poly', 'sigmoid'], 'C': [1e0, 5e0, 1e1, 5e1, 1e2, 5e2], 'degree': [1, 2, 3, 4], 'gamma': [0.0, 0.0001, 0.0005, 0.001]}
#run the svm
prediction, svm_accuracy = classifiers.run_support_vector_machines(X_train, y_train.flatten(), X_test, y_test.flatten(), passed_parameters = svm_param)
print("SVM accuracy = %f" % svm_accuracy)
#print("\n\n--------------------------")
#print("Neural Net")
#print("--------------------------")
##run the neural net
#prediction, nn_accuracy = classifiers.run_neural_net(X_train, y_train.flatten(), X_test, y_test.flatten())
#print("Neural Net accuracy = %f" % nn_accuracy)
#return max(decision_tree_accuracy, boosting_accuracy, knn_accuracy, svm_accuracy, nn_accuracy)
return max(decision_tree_accuracy, boosting_accuracy, knn_accuracy, svm_accuracy)
def plot_residual_vs_fit(y_act, y_pred, r_value, name):
#make a scatterplot
plt.figure()
plt.scatter(y_pred, y_act - y_pred)
plt.xlabel("Fitted Value")
plt.ylabel("Residual")
plt.text(x = 25, y = 45, s = "R^2: %.3f" % r_value)
plt.title("Residuals " + name)
plt.axhline(0, color= 'b', linestyle='-')
pylab.savefig(os.path.join(os.getcwd(),"Results","Residual Plots",name))
plt.close()
return
def run_regressors(X_train, y_train, X_test, y_test, header, cvs_writer, run_name = ""):
print("\nRun Name = %s" % run_name)
#create decision tree range
decision_tree_param = {'max_depth': range(1, 20, 1)}
#run the decision tree
prediction, decision_tree_accuracy = regressors.run_decision_tree(X_train, y_train.flatten(), X_test, y_test.flatten(), passed_parameters = decision_tree_param, headings = header, title = run_name)
print("Decision tree accuracy = %f" % decision_tree_accuracy)
plot_residual_vs_fit(y_test.flatten(), prediction, decision_tree_accuracy, "Decision Tree %s" % run_name)
#create adaboost range
adaboost_parameters = {'base_estimator__max_depth': range(1, 5), 'n_estimators' : [100], 'learning_rate' : [1] }
#run the adaboost regressor
prediction, boosting_accuracy = regressors.run_boosting(X_train, y_train.flatten(), X_test, y_test.flatten(), passed_parameters = adaboost_parameters)
print("Boosting accuracy = %f" % boosting_accuracy)
plot_residual_vs_fit(y_test.flatten(), prediction, boosting_accuracy, "Adaboost %s" % run_name)
#run the random forest regressor
prediction, rand_forest_accuracy = regressors.run_random_forest(X_train, y_train.flatten(), X_test, y_test.flatten(), passed_parameters = decision_tree_param)
print("Random forest accuracy = %f" % rand_forest_accuracy)
plot_residual_vs_fit(y_test.flatten(), prediction, rand_forest_accuracy, "Random Forest %s" % run_name)
#run the linear regressor
prediction, linear_accuracy = regressors.run_linear_regression(X_train, y_train.flatten(), X_test, y_test.flatten(), passed_parameters = decision_tree_param, headings = header)
print("Linear Regressor accuracy = %f" % linear_accuracy)
plot_residual_vs_fit(y_test.flatten(), prediction, linear_accuracy, "Linear Regression %s" % run_name)
##run svr
#svr_parameters = {'kernel' : ['rbf', 'poly', 'linear', 'sigmoid'], 'degree' : [2, 3, 4, 5, 6, 7, 8, 9, 10]}
#prediction, svr_accuracy = regressors.run_support_vector_regressor(X_train, y_train.flatten(), X_test, y_test.flatten(), passed_parameters = svr_parameters)
#print("SVM accuracy = %f" % svr_accuracy)
svr_accuracy = 0
best_regressor = max(decision_tree_accuracy, boosting_accuracy, rand_forest_accuracy, linear_accuracy, svr_accuracy)
cvs_writer.writerow([run_name] + [decision_tree_accuracy, boosting_accuracy, rand_forest_accuracy, linear_accuracy, svr_accuracy] + [""] + header.tolist())
return best_regressor
def feature_reduction(header, data, is_classification = True):
#remove low variance features
data_lvar_removed, header_lvar_removed = clustering.clean_features(data, header, min_feature_variance = .8 * (1 - .8))
#create training/test sets
X_train, X_test, y_train, y_test = divide_for_training(data_lvar_removed)
#select the best features using univatiate selection
selected_features, feature_uni_scores = clustering.univariate_selection(np.vstack((X_train, X_test)), np.vstack((y_train, y_test)), n_best = 3, is_regression = not is_classification)
#determine the order of the univariate features
best_feature_index = np.argsort(-feature_uni_scores)
best_results = []
#reselect the features
top_features = 12
X_train_uni, X_test_uni = np.take(X_train, best_feature_index[ : top_features], axis = 1), np.take(X_test, best_feature_index[ : top_features], axis = 1)
header_uni = np.take(header_lvar_removed, best_feature_index[ : top_features])
##try the PCA reduction
#data_pca = clustering.pca_reduce(np.vstack((X_train, X_test)), n_components = top_features)
#X_train_pca, X_test_pca, y_train_pca, y_test_pca = divide_for_training(np.hstack((np.vstack((y_train, y_test)), data_pca)))
##try the ICA reduction
#data_ica = clustering.ica_reduce(np.vstack((X_train, X_test)), n_components = top_features)
#X_train_ica, X_test_ica, y_train_ica, y_test_ica = divide_for_training(np.hstack((np.vstack((y_train, y_test)), data_ica)))
#try recursive reduction
data_recursive = clustering.recursive_reduce(np.vstack((X_train, X_test)), np.vstack((y_train, y_test)).flatten(), is_regression = not is_classification)
X_train_recursive, X_test_recursive, y_train_recursive, y_test_recursive = divide_for_training(np.hstack((np.vstack((y_train, y_test)), data_ica)))
if(is_classification):
#run the classifiers and find the best result
#best_classifier_uni = run_classifiers(X_train_uni, y_train, X_test_uni, y_test, header_uni)
best_classifier_pca = run_classifiers(X_train_pca, y_train_pca, X_test_pca, y_test_pca, header_lvar_removed)
else:
#best_regressor_uni = run_regressors(X_train_uni, y_train, X_test_uni, y_test, header_uni)
#best_regressor_pca = run_regressors(X_train_pca, y_train_pca, X_test_pca, y_test_pca, header_lvar_removed)
#best_regressor_ica = run_regressors(X_train_ica, y_train_ica, X_test_ica, y_test_ica, header_lvar_removed)
best_regressor_recursive = run_regressors(X_train_recursive, y_train_recursive, X_test_recursive, y_test_recursive, header_lvar_removed)
best_results.append(max(best_regressor_ica, best_regressor_pca, best_regressor_uni))
return
def main():
#create a dictionary of feature setups
#select the appropriate columns
feature_dict = {
#"Full" : header.tolist(),
"Lecture Views" : ['overal_lecture_views', 'total_lecture_time'],
"Piazza Use" : ['mid_on_piazza', 'final_on_piazza', 'piazza_posts', 'piazza_days', 'piazza_views'],
"Lecture Pace" : ['lecture_1_pace_Late', 'lecture_1_pace_On-time', 'lecture_1_pace_Unknown', 'lecture_2_pace_Late', 'lecture_2_pace_On-time', 'lecture_2_pace_Unknown', 'lecture_3_pace_Late', 'lecture_3_pace_On-time', 'lecture_3_pace_Unknown', 'lecture_4_pace_Early', 'lecture_4_pace_Late', 'lecture_4_pace_On-time', 'lecture_4_pace_Unknown', 'lecture_5_pace_Early', 'lecture_5_pace_Late', 'lecture_5_pace_On-time', 'lecture_5_pace_Unknown', 'lecture_6_pace_Early', 'lecture_6_pace_Late', 'lecture_6_pace_On-time', 'lecture_6_pace_Unknown', 'lecture_7_pace_Early', 'lecture_7_pace_Late', 'lecture_7_pace_On-time', 'lecture_7_pace_Unknown', 'lecture_8_pace_Early', 'lecture_8_pace_Late', 'lecture_8_pace_On-time', 'lecture_8_pace_Unknown', 'lecture_9_pace_Early', 'lecture_9_pace_Late', 'lecture_9_pace_On-time', 'lecture_9_pace_Unknown', 'lecture_10_pace_Early', 'lecture_10_pace_Late', 'lecture_10_pace_On-time', 'lecture_10_pace_Unknown', 'lecture_11_pace_Early', 'lecture_11_pace_Late', 'lecture_11_pace_On-time', 'lecture_11_pace_Unknown', 'lecture_12_pace_Early', 'lecture_12_pace_Late', 'lecture_12_pace_On-time', 'lecture_12_pace_Unknown', 'lecture_13_pace_Early', 'lecture_13_pace_Late', 'lecture_13_pace_On-time', 'lecture_13_pace_Unknown', 'lecture_14_pace_Early', 'lecture_14_pace_Late', 'lecture_14_pace_On-time', 'lecture_14_pace_Unknown', 'lecture_15_pace_Early', 'lecture_15_pace_Late', 'lecture_15_pace_On-time', 'lecture_15_pace_Unknown', 'lecture_16_pace_Early', 'lecture_16_pace_Late', 'lecture_16_pace_On-time', 'lecture_16_pace_Unknown', 'lecture_17_pace_Early', 'lecture_17_pace_Late', 'lecture_17_pace_On-time', 'lecture_17_pace_Unknown', 'lecture_18_pace_Early', 'lecture_18_pace_Late', 'lecture_18_pace_On-time', 'lecture_18_pace_Unknown', 'lecture_19_pace_Early', 'lecture_19_pace_Late', 'lecture_19_pace_On-time', 'lecture_19_pace_Unknown', 'lecture_20_pace_Early', 'lecture_20_pace_Late', 'lecture_20_pace_On-time', 'lecture_20_pace_Unknown', 'lecture_21_pace_Early', 'lecture_21_pace_Late', 'lecture_21_pace_On-time', 'lecture_21_pace_Unknown', 'lecture_22_pace_Early', 'lecture_22_pace_Late', 'lecture_22_pace_On-time', 'lecture_22_pace_Unknown', 'lecture_23_pace_Early', 'lecture_23_pace_Late', 'lecture_23_pace_On-time', 'lecture_23_pace_Unknown', 'lecture_24_pace_Early', 'lecture_24_pace_Late', 'lecture_24_pace_On-time', 'lecture_24_pace_Unknown', 'lecture_25_pace_Early', 'lecture_25_pace_Late', 'lecture_25_pace_On-time', 'lecture_25_pace_Unknown', 'lecture_26_pace_Early', 'lecture_26_pace_Late', 'lecture_26_pace_On-time', 'lecture_26_pace_Unknown', 'overall_pace_Early', 'overall_pace_Late', 'overall_pace_On-time', 'overall_pace_Unknown'],
"Classmate Contact" : ['qtr_on_piazza', 'qtr_email', 'qtr_hipchat', 'qrt_gplus', 'qtr_other_chat', 'qtr_phone', 'qtr_facebook', 'qtr_in_person', 'mid_on_piazza', 'mid_email', 'mid_hipchat', 'qrt_gplus', 'mid_other_chat', 'mid_phone', 'mid_facebook', 'mid_in_person', 'final_on_piazza', 'final_email', 'final_hipchat', 'qrt_gplus', 'final_other_chat', 'final_phone', 'final_facebook', 'final_in_person'],
"Lecture Amount" : ['total_lecture_time', 'overal_lecture_views', 'lecture_1_views', 'lecture_2_views', 'lecture_3_views', 'lecture_4_views', 'lecture_5_views', 'lecture_6_views', 'lecture_7_views', 'lecture_8_views', 'lecture_9_views', 'lecture_10_views', 'lecture_11_views', 'lecture_12_views', 'lecture_13_views', 'lecture_14_views', 'lecture_15_views', 'lecture_16_views', 'lecture_17_views', 'lecture_18_views', 'lecture_19_views', 'lecture_20_views', 'lecture_21_views', 'lecture_22_views', 'lecture_23_views', 'lecture_24_views', 'lecture_25_views', 'lecture_26_views'],
"Prior Experience" : ['formal_class_prog_taken', 'C', 'C#', 'C++', 'Java', 'JavaScript', 'Lisp', 'Objective C', 'Perl', 'PHP', 'Python', 'Ruby', 'Shell', 'Swift', 'Visual Basic', 'Other (specify below)', 'years_programming', 'prior_omscs_classes_completed', 'occupation', 'highest_education', 'besides_KBAI_how_many_classes', 'moocs_completed_outside_OMSCS'],
"Self Assesment" : ['qtr_proj1_confidence_neither confident nor unconfident', 'qtr_proj1_confidence_no answer', 'qtr_proj1_confidence_somewhat confident', 'qtr_proj1_confidence_somewhat unconfident', 'qtr_proj1_confidence_very confident', 'qtr_proj1_confidence_very unconfident', 'qtr_proj2_confidence_neither confident nor unconfident', 'qtr_proj2_confidence_no answer', 'qtr_proj2_confidence_somewhat confident', 'qtr_proj2_confidence_somewhat unconfident', 'qtr_proj2_confidence_very confident', 'qtr_proj2_confidence_very unconfident', 'mid_proj2_confidence_neither confident nor unconfident', 'mid_proj2_confidence_no answer', 'mid_proj2_confidence_somewhat confident', 'mid_proj2_confidence_somewhat unconfident', 'mid_proj2_confidence_very confident', 'mid_proj2_confidence_very unconfident', 'mid_proj3_confidence_neither confident nor unconfident', 'mid_proj3_confidence_no answer', 'mid_proj3_confidence_somewhat confident', 'mid_proj3_confidence_somewhat unconfident', 'mid_proj3_confidence_very confident', 'mid_proj3_confidence_very unconfident', 'final_proj3_confidence_neither confident nor unconfident', 'final_proj3_confidence_no answer', 'final_proj3_confidence_somewhat confident', 'final_proj3_confidence_somewhat unconfident', 'final_proj3_confidence_very confident', 'final_proj3_confidence_very unconfident']
}
#list the data sources
data_sources = [
"basic_data",
"basic_data_only_finishers",
"basic_data_clean_lecture",
"basic_data_piazza",
"basic_data_piazza_only_finishers"
]
#create csv for results
with open(os.path.join(results_location, 'regression_results.csv'), 'wb') as output_file:
#establish the csv writer
writer = csv.writer(output_file, delimiter=',')
for data_source in data_sources:
print("\n\n------------------")
print("Data Set - %s" % data_source)
print("------------------")
#this section determines R^2 scores of the regressors
writer.writerow(["R^2 Scores"])
writer.writerow(["Dataset - %s" % data_source] )
#load the data from the csv
header, data = data_work.load_data(data_source + ".csv", conversion_function = data_work.convert_survey_data, max_records = None)
#create headings
writer.writerow(["Feature", "Decision Tree", "Boosting", "Random Forest", "Linear Regression", "Support Vector Machine", "", "Feature Details"])
#loop through all the feature set combos
for feature_set_name, select_columns in feature_dict.iteritems():
print("\n\n------------------")
print("Feature Set - %s" % feature_set_name)
print("------------------")
#get the data subset
header_subset, data_subset = data_work.select_data_columns(header, data, column_names = ['course_grade'] + select_columns)
#first run on the full set
#assumes first column is Y
X_train, X_test, y_train, y_test = data_work.divide_for_training(data_subset)
#remove the label header
header_subset = header_subset[1 : ]
#scale the data
X_train, X_test = data_work.scale_features(X_train, X_test)
#test all to start
run_regressors(X_train, y_train, X_test, y_test, header_subset, writer, data_source + "-" + feature_set_name + " Linear")
#for degree in [2, 3, 4]:
for degree in [2]:
#convert to polynomials
poly = PolynomialFeatures(degree=degree)
X_train_poly = poly.fit_transform(X_train)
X_test_poly = poly.fit_transform(X_test)
#test all in poly
run_regressors(X_train_poly , y_train, X_test_poly, y_test, header_subset, writer, data_source + "-" + feature_set_name + " Poly %i" % degree)
##test individually
#for i in range(0, X_train.shape[1]):
# run_regressors(X_train[:, i,np.newaxis], y_train, X_test[:, i,np.newaxis], y_test, header_subset[i + 1, np.newaxis], writer)
return
if __name__ == "__main__":
main() | gpl-2.0 |
Myasuka/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
blaisb/cfdemUtilities | viscosymetri/plotViscPlate.py | 2 | 2515 | # This extract the viscous force in the X direction from the force log
# and compares it with the analytical solution
# USAGE : python ./monitorTorque.py LOGFILE
# Author : Bruno Blais
# Last modified : 5-08-2014
#Python imports
#----------------
import os
import sys
import numpy
import time
import matplotlib.pyplot as plt
import re # Ouhh regular expressions :)
#----------------
#********************************
# OPTIONS AND USER PARAMETERS
#********************************
#Physical parameter
L=0.025
H=0.025
V=0.1
mu=1.0
phiVar=17500.*20;
epsMax=0.65
nBar=2.5
factor = H / V / L / L
#=============================
# READER OF LOG FILE
#=============================
# This function reads the log file and extracts the torque
def readf(fname):
t=[]
moment=[]
infile = open(fname,'r')
if (infile!=0):
print "Log file opened"
for l in infile:
l_str = l.split(")")
if (len(l_str)>3):
l2_str = l_str[1].split("(")
if (len(l_str)>2):
l3_str=l2_str[1].split()
l3_num = float(l3_str[0])
moment.extend([l3_num])
l2_str = l_str[0].split()
l2_num = float(l2_str[0])
t.extend([l2_num])
else:
print "File %s could not be opened" %fname
for i in range(0,len(moment)):
moment[i] = abs(moment[i] * factor)
return t, moment
infile.close();
#======================
# MAIN
#======================
# Get name from terminal
ax = plt.figure("Viscous Force") #Create window
#Labeling
plt.ylabel('Dynamic viscosity [Pa*s]')
plt.xlabel('Time [s]')
plt.title('Dynamic evolution of the viscosity')
visc=[]
phi=[]
viscP = []
for i in range(1,len(sys.argv)):
fname = sys.argv[i]
[t,moment] = readf(fname)
phi.extend([1.-float(fname)])
visc.extend([numpy.average(moment[-2:-1])])
plt.plot(t,moment,'-')
#get the power viscosity
#fnamePower="p"+fname
#t, p = numpy.loadtxt(fnamePower, unpack=True)
# convert power to viscosity
#viscP.extend([p[-1]*factor])
plt.show()
#Second plot of evolution of viscosity vs phi
ax = plt.figure("Viscosity") #Create window
plt.ylabel('Dynamic Viscosity [Pa*s]')
plt.xlabel('Fraction of solid')
plt.title('Viscosity vs fraction of solid')
viscAnalytical=[]
for i in phi:
viscAnalytical.extend([(1-i/epsMax)**(-nBar*epsMax)])
plt.plot(phi,visc,'-x', label='Simulation results Force')
#plt.plot(phi,viscP,'-x', label='Simulation results Power')
plt.plot(phi, viscAnalytical,'-.o',label='Analytical model')
plt.legend(loc=2)
#plt.yscale('log')
plt.show()
| lgpl-3.0 |
liuzhaoguo/FreeROI-1 | froi/algorithm/unused/kmeanmapper.py | 6 | 3469 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Mapper for kmeans clustering.
Date: 2012.04.27
"""
__docformat__ = 'restructuredtext'
import numpy as np
import scipy.sparse as sp
from mvpa2.base import warning
from mvpa2.base.dochelpers import _str, borrowkwargs, _repr_attrs
from mvpa2.mappers.base import accepts_dataset_as_samples, Mapper
from mvpa2.datasets.base import Dataset
from mvpa2.datasets.miscfx import get_nsamples_per_attr, get_samples_by_attr
from mvpa2.support import copy
from sklearn.cluster import KMeans
class KMeanMapper(Mapper):
"""Mapper to do Kmean clustering
"""
def __init__(self, chunks_attr=None, k=8, init='k-means++', n_init=10, **kwargs):
"""
parameters
__________
chunks_attr : str or None
If provided, it specifies the name of a samples attribute in the
training data, unique values of which will be used to identify chunks of
samples, and to perform individual clustering within them.
k : int or ndarray
The number of clusters to form as well as the number of centroids to
generate. If init initialization string is matrix, or if a ndarray
is given instead, it is interpreted as initial cluster to use instead
init : {k-means++, random, points, matrix}
Method for initialization, defaults to k-means++:k-means++ :
selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. See section Notes in k_init for more details.
random: generate k centroids from a Gaussian with mean and variance
estimated from the data.points: choose k observations (rows) at
random from data for the initial centroids.matrix: interpret the k
parameter as a k by M (or length k array for one-dimensional data)
array of initial centroids.
n_init : int
Number of iterations of the k-means algrithm to run. Note that this
differs in meaning from the iters parameter to the kmeans function.
"""
Mapper.__init__(self, **kwargs)
self.__chunks_attr = chunks_attr
self.__k = k
self.__init = init
self.__n_init = n_init
def __repr__(self, prefixes=[]):
return super(KMeanMapper, self).__repr__(
prefixes=prefixes
+ _repr_attrs(self, ['chunks_attr', 'k', 'init', 'n_init']))
def __str__(self):
return _str(self)
def _forward_dataset(self, ds):
chunks_attr = self.__chunks_attr
mds = Dataset([])
mds.a = ds.a
# mds.sa =ds.sa
# mds.fa =ds.fa
if chunks_attr is None:
# global kmeans
mds.samples = self._kmeans(ds.samples).labels_
print max(mds.samples)
else:
# per chunk kmeans
for c in ds.sa[chunks_attr].unique:
slicer = np.where(ds.sa[chunks_attr].value == c)[0]
mds.samples = ds.samples[0,:]
mds.samples[slicer] = self._kmeans(ds.samples[slicer]).labels_
return mds
def _kmeans(self, samples):
if sp.issparse(samples):
samples = samples.todense()
samples = samples.T
print np.shape(samples)
return KMeans(k=self.__k, n_init=self.__n_init, init=self.__init).fit(samples)
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.