repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
nvoron23/scikit-learn | sklearn/linear_model/randomized_l1.py | 68 | 23405 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
cshallue/models | research/delf/delf/python/examples/match_images.py | 3 | 4296 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Matches two images using their DELF features.
The matching is done using feature-based nearest-neighbor search, followed by
geometric verification using RANSAC.
The DELF features can be extracted using the extract_features.py script.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import cKDTree
from skimage.feature import plot_matches
from skimage.measure import ransac
from skimage.transform import AffineTransform
import tensorflow as tf
from tensorflow.python.platform import app
from delf import feature_io
cmd_args = None
_DISTANCE_THRESHOLD = 0.8
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Read features.
locations_1, _, descriptors_1, _, _ = feature_io.ReadFromFile(
cmd_args.features_1_path)
num_features_1 = locations_1.shape[0]
tf.logging.info("Loaded image 1's %d features" % num_features_1)
locations_2, _, descriptors_2, _, _ = feature_io.ReadFromFile(
cmd_args.features_2_path)
num_features_2 = locations_2.shape[0]
tf.logging.info("Loaded image 2's %d features" % num_features_2)
# Find nearest-neighbor matches using a KD tree.
d1_tree = cKDTree(descriptors_1)
_, indices = d1_tree.query(
descriptors_2, distance_upper_bound=_DISTANCE_THRESHOLD)
# Select feature locations for putative matches.
locations_2_to_use = np.array([
locations_2[i,]
for i in range(num_features_2)
if indices[i] != num_features_1
])
locations_1_to_use = np.array([
locations_1[indices[i],]
for i in range(num_features_2)
if indices[i] != num_features_1
])
# Perform geometric verification using RANSAC.
_, inliers = ransac(
(locations_1_to_use, locations_2_to_use),
AffineTransform,
min_samples=3,
residual_threshold=20,
max_trials=1000)
tf.logging.info('Found %d inliers' % sum(inliers))
# Visualize correspondences, and save to file.
_, ax = plt.subplots()
img_1 = mpimg.imread(cmd_args.image_1_path)
img_2 = mpimg.imread(cmd_args.image_2_path)
inlier_idxs = np.nonzero(inliers)[0]
plot_matches(
ax,
img_1,
img_2,
locations_1_to_use,
locations_2_to_use,
np.column_stack((inlier_idxs, inlier_idxs)),
matches_color='b')
ax.axis('off')
ax.set_title('DELF correspondences')
plt.savefig(cmd_args.output_image)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--image_1_path',
type=str,
default='test_images/image_1.jpg',
help="""
Path to test image 1.
""")
parser.add_argument(
'--image_2_path',
type=str,
default='test_images/image_2.jpg',
help="""
Path to test image 2.
""")
parser.add_argument(
'--features_1_path',
type=str,
default='test_features/image_1.delf',
help="""
Path to DELF features from image 1.
""")
parser.add_argument(
'--features_2_path',
type=str,
default='test_features/image_2.delf',
help="""
Path to DELF features from image 2.
""")
parser.add_argument(
'--output_image',
type=str,
default='test_match.png',
help="""
Path where an image showing the matches will be saved.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
djgagne/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
kjung/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 127 | 7477 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
tombstone/models | research/keypointnet/main.py | 4 | 21991 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""KeypointNet!!
A reimplementation of 'Discovery of Latent 3D Keypoints via End-to-end
Geometric Reasoning' keypoint network. Given a single 2D image of a known class,
this network can predict a set of 3D keypoints that are consistent across
viewing angles of the same object and across object instances. These keypoints
and their detectors are discovered and learned automatically without
keypoint location supervision.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import matplotlib.pyplot as plt
import numpy as np
import os
from scipy import misc
import sys
import tensorflow as tf
import tensorflow.contrib.slim as slim
import utils
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_boolean("predict", False, "Running inference if true")
tf.app.flags.DEFINE_string(
"input",
"",
"Input folder containing images")
tf.app.flags.DEFINE_string("model_dir", None, "Estimator model_dir")
tf.app.flags.DEFINE_string(
"dset",
"",
"Path to the directory containing the dataset.")
tf.app.flags.DEFINE_integer("steps", 200000, "Training steps")
tf.app.flags.DEFINE_integer("batch_size", 8, "Size of mini-batch.")
tf.app.flags.DEFINE_string(
"hparams", "",
"A comma-separated list of `name=value` hyperparameter values. This flag "
"is used to override hyperparameter settings either when manually "
"selecting hyperparameters or when using Vizier.")
tf.app.flags.DEFINE_integer(
"sync_replicas", -1,
"If > 0, use SyncReplicasOptimizer and use this many replicas per sync.")
# Fixed input size 128 x 128.
vw = vh = 128
def create_input_fn(split, batch_size):
"""Returns input_fn for tf.estimator.Estimator.
Reads tfrecords and construts input_fn for either training or eval. All
tfrecords not in test.txt or dev.txt will be assigned to training set.
Args:
split: A string indicating the split. Can be either 'train' or 'validation'.
batch_size: The batch size!
Returns:
input_fn for tf.estimator.Estimator.
Raises:
IOError: If test.txt or dev.txt are not found.
"""
if (not os.path.exists(os.path.join(FLAGS.dset, "test.txt")) or
not os.path.exists(os.path.join(FLAGS.dset, "dev.txt"))):
raise IOError("test.txt or dev.txt not found")
with open(os.path.join(FLAGS.dset, "test.txt"), "r") as f:
testset = [x.strip() for x in f.readlines()]
with open(os.path.join(FLAGS.dset, "dev.txt"), "r") as f:
validset = [x.strip() for x in f.readlines()]
files = os.listdir(FLAGS.dset)
filenames = []
for f in files:
sp = os.path.splitext(f)
if sp[1] != ".tfrecord" or sp[0] in testset:
continue
if ((split == "validation" and sp[0] in validset) or
(split == "train" and sp[0] not in validset)):
filenames.append(os.path.join(FLAGS.dset, f))
def input_fn():
"""input_fn for tf.estimator.Estimator."""
def parser(serialized_example):
"""Parses a single tf.Example into image and label tensors."""
fs = tf.parse_single_example(
serialized_example,
features={
"img0": tf.FixedLenFeature([], tf.string),
"img1": tf.FixedLenFeature([], tf.string),
"mv0": tf.FixedLenFeature([16], tf.float32),
"mvi0": tf.FixedLenFeature([16], tf.float32),
"mv1": tf.FixedLenFeature([16], tf.float32),
"mvi1": tf.FixedLenFeature([16], tf.float32),
})
fs["img0"] = tf.div(tf.to_float(tf.image.decode_png(fs["img0"], 4)), 255)
fs["img1"] = tf.div(tf.to_float(tf.image.decode_png(fs["img1"], 4)), 255)
fs["img0"].set_shape([vh, vw, 4])
fs["img1"].set_shape([vh, vw, 4])
# fs["lr0"] = [fs["mv0"][0]]
# fs["lr1"] = [fs["mv1"][0]]
fs["lr0"] = tf.convert_to_tensor([fs["mv0"][0]])
fs["lr1"] = tf.convert_to_tensor([fs["mv1"][0]])
return fs
np.random.shuffle(filenames)
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(parser, num_parallel_calls=4)
dataset = dataset.shuffle(400).repeat().batch(batch_size)
dataset = dataset.prefetch(buffer_size=256)
return dataset.make_one_shot_iterator().get_next(), None
return input_fn
class Transformer(object):
"""A utility for projecting 3D points to 2D coordinates and vice versa.
3D points are represented in 4D-homogeneous world coordinates. The pixel
coordinates are represented in normalized device coordinates [-1, 1].
See https://learnopengl.com/Getting-started/Coordinate-Systems.
"""
def __get_matrix(self, lines):
return np.array([[float(y) for y in x.strip().split(" ")] for x in lines])
def __read_projection_matrix(self, filename):
if not os.path.exists(filename):
filename = "/cns/vz-d/home/supasorn/datasets/cars/projection.txt"
with open(filename, "r") as f:
lines = f.readlines()
return self.__get_matrix(lines)
def __init__(self, w, h, dataset_dir):
self.w = w
self.h = h
p = self.__read_projection_matrix(dataset_dir + "projection.txt")
# transposed of inversed projection matrix.
self.pinv_t = tf.constant([[1.0 / p[0, 0], 0, 0,
0], [0, 1.0 / p[1, 1], 0, 0], [0, 0, 1, 0],
[0, 0, 0, 1]])
self.f = p[0, 0]
def project(self, xyzw):
"""Projects homogeneous 3D coordinates to normalized device coordinates."""
z = xyzw[:, :, 2:3] + 1e-8
return tf.concat([-self.f * xyzw[:, :, :2] / z, z], axis=2)
def unproject(self, xyz):
"""Unprojects normalized device coordinates with depth to 3D coordinates."""
z = xyz[:, :, 2:]
xy = -xyz * z
def batch_matmul(a, b):
return tf.reshape(
tf.matmul(tf.reshape(a, [-1, a.shape[2].value]), b),
[-1, a.shape[1].value, a.shape[2].value])
return batch_matmul(
tf.concat([xy[:, :, :2], z, tf.ones_like(z)], axis=2), self.pinv_t)
def meshgrid(h):
"""Returns a meshgrid ranging from [-1, 1] in x, y axes."""
r = np.arange(0.5, h, 1) / (h / 2) - 1
ranx, rany = tf.meshgrid(r, -r)
return tf.to_float(ranx), tf.to_float(rany)
def estimate_rotation(xyz0, xyz1, pconf, noise):
"""Estimates the rotation between two sets of keypoints.
The rotation is estimated by first subtracting mean from each set of keypoints
and computing SVD of the covariance matrix.
Args:
xyz0: [batch, num_kp, 3] The first set of keypoints.
xyz1: [batch, num_kp, 3] The second set of keypoints.
pconf: [batch, num_kp] The weights used to compute the rotation estimate.
noise: A number indicating the noise added to the keypoints.
Returns:
[batch, 3, 3] A batch of transposed 3 x 3 rotation matrices.
"""
xyz0 += tf.random_normal(tf.shape(xyz0), mean=0, stddev=noise)
xyz1 += tf.random_normal(tf.shape(xyz1), mean=0, stddev=noise)
pconf2 = tf.expand_dims(pconf, 2)
cen0 = tf.reduce_sum(xyz0 * pconf2, 1, keepdims=True)
cen1 = tf.reduce_sum(xyz1 * pconf2, 1, keepdims=True)
x = xyz0 - cen0
y = xyz1 - cen1
cov = tf.matmul(tf.matmul(x, tf.matrix_diag(pconf), transpose_a=True), y)
_, u, v = tf.svd(cov, full_matrices=True)
d = tf.matrix_determinant(tf.matmul(v, u, transpose_b=True))
ud = tf.concat(
[u[:, :, :-1], u[:, :, -1:] * tf.expand_dims(tf.expand_dims(d, 1), 1)],
axis=2)
return tf.matmul(ud, v, transpose_b=True)
def relative_pose_loss(xyz0, xyz1, rot, pconf, noise):
"""Computes the relative pose loss (chordal, angular).
Args:
xyz0: [batch, num_kp, 3] The first set of keypoints.
xyz1: [batch, num_kp, 3] The second set of keypoints.
rot: [batch, 4, 4] The ground-truth rotation matrices.
pconf: [batch, num_kp] The weights used to compute the rotation estimate.
noise: A number indicating the noise added to the keypoints.
Returns:
A tuple (chordal loss, angular loss).
"""
r_transposed = estimate_rotation(xyz0, xyz1, pconf, noise)
rotation = rot[:, :3, :3]
frob_sqr = tf.reduce_sum(tf.square(r_transposed - rotation), axis=[1, 2])
frob = tf.sqrt(frob_sqr)
return tf.reduce_mean(frob_sqr), \
2.0 * tf.reduce_mean(tf.asin(tf.minimum(1.0, frob / (2 * math.sqrt(2)))))
def separation_loss(xyz, delta):
"""Computes the separation loss.
Args:
xyz: [batch, num_kp, 3] Input keypoints.
delta: A separation threshold. Incur 0 cost if the distance >= delta.
Returns:
The seperation loss.
"""
num_kp = tf.shape(xyz)[1]
t1 = tf.tile(xyz, [1, num_kp, 1])
t2 = tf.reshape(tf.tile(xyz, [1, 1, num_kp]), tf.shape(t1))
diffsq = tf.square(t1 - t2)
# -> [batch, num_kp ^ 2]
lensqr = tf.reduce_sum(diffsq, axis=2)
return (tf.reduce_sum(tf.maximum(-lensqr + delta, 0.0)) / tf.to_float(
num_kp * FLAGS.batch_size * 2))
def consistency_loss(uv0, uv1, pconf):
"""Computes multi-view consistency loss between two sets of keypoints.
Args:
uv0: [batch, num_kp, 2] The first set of keypoint 2D coordinates.
uv1: [batch, num_kp, 2] The second set of keypoint 2D coordinates.
pconf: [batch, num_kp] The weights used to compute the rotation estimate.
Returns:
The consistency loss.
"""
# [batch, num_kp, 2]
wd = tf.square(uv0 - uv1) * tf.expand_dims(pconf, 2)
wd = tf.reduce_sum(wd, axis=[1, 2])
return tf.reduce_mean(wd)
def variance_loss(probmap, ranx, rany, uv):
"""Computes the variance loss as part of Sillhouette consistency.
Args:
probmap: [batch, num_kp, h, w] The distribution map of keypoint locations.
ranx: X-axis meshgrid.
rany: Y-axis meshgrid.
uv: [batch, num_kp, 2] Keypoint locations (in NDC).
Returns:
The variance loss.
"""
ran = tf.stack([ranx, rany], axis=2)
sh = tf.shape(ran)
# [batch, num_kp, vh, vw, 2]
ran = tf.reshape(ran, [1, 1, sh[0], sh[1], 2])
sh = tf.shape(uv)
uv = tf.reshape(uv, [sh[0], sh[1], 1, 1, 2])
diff = tf.reduce_sum(tf.square(uv - ran), axis=4)
diff *= probmap
return tf.reduce_mean(tf.reduce_sum(diff, axis=[2, 3]))
def dilated_cnn(images, num_filters, is_training):
"""Constructs a base dilated convolutional network.
Args:
images: [batch, h, w, 3] Input RGB images.
num_filters: The number of filters for all layers.
is_training: True if this function is called during training.
Returns:
Output of this dilated CNN.
"""
net = images
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
normalizer_fn=slim.batch_norm,
activation_fn=lambda x: tf.nn.leaky_relu(x, alpha=0.1),
normalizer_params={"is_training": is_training}):
for i, r in enumerate([1, 1, 2, 4, 8, 16, 1, 2, 4, 8, 16, 1]):
net = slim.conv2d(net, num_filters, [3, 3], rate=r, scope="dconv%d" % i)
return net
def orientation_network(images, num_filters, is_training):
"""Constructs a network that infers the orientation of an object.
Args:
images: [batch, h, w, 3] Input RGB images.
num_filters: The number of filters for all layers.
is_training: True if this function is called during training.
Returns:
Output of the orientation network.
"""
with tf.variable_scope("OrientationNetwork"):
net = dilated_cnn(images, num_filters, is_training)
modules = 2
prob = slim.conv2d(net, 2, [3, 3], rate=1, activation_fn=None)
prob = tf.transpose(prob, [0, 3, 1, 2])
prob = tf.reshape(prob, [-1, modules, vh * vw])
prob = tf.nn.softmax(prob)
ranx, rany = meshgrid(vh)
prob = tf.reshape(prob, [-1, 2, vh, vw])
sx = tf.reduce_sum(prob * ranx, axis=[2, 3])
sy = tf.reduce_sum(prob * rany, axis=[2, 3]) # -> batch x modules
out_xy = tf.reshape(tf.stack([sx, sy], -1), [-1, modules, 2])
return out_xy
def keypoint_network(rgba,
num_filters,
num_kp,
is_training,
lr_gt=None,
anneal=1):
"""Constructs our main keypoint network that predicts 3D keypoints.
Args:
rgba: [batch, h, w, 4] Input RGB images with alpha channel.
num_filters: The number of filters for all layers.
num_kp: The number of keypoints.
is_training: True if this function is called during training.
lr_gt: The groundtruth orientation flag used at the beginning of training.
Then we linearly anneal in the prediction.
anneal: A number between [0, 1] where 1 means using the ground-truth
orientation and 0 means using our estimate.
Returns:
uv: [batch, num_kp, 2] 2D locations of keypoints.
z: [batch, num_kp] The depth of keypoints.
orient: [batch, 2, 2] Two 2D coordinates that correspond to [1, 0, 0] and
[-1, 0, 0] in object space.
sill: The Sillhouette loss.
variance: The variance loss.
prob_viz: A visualization of all predicted keypoints.
prob_vizs: A list of visualizations of each keypoint.
"""
images = rgba[:, :, :, :3]
# [batch, 1]
orient = orientation_network(images, num_filters * 0.5, is_training)
# [batch, 1]
lr_estimated = tf.maximum(0.0, tf.sign(orient[:, 0, :1] - orient[:, 1, :1]))
if lr_gt is None:
lr = lr_estimated
else:
lr_gt = tf.maximum(0.0, tf.sign(lr_gt[:, :1]))
lr = tf.round(lr_gt * anneal + lr_estimated * (1 - anneal))
lrtiled = tf.tile(
tf.expand_dims(tf.expand_dims(lr, 1), 1),
[1, images.shape[1], images.shape[2], 1])
images = tf.concat([images, lrtiled], axis=3)
mask = rgba[:, :, :, 3]
mask = tf.cast(tf.greater(mask, tf.zeros_like(mask)), dtype=tf.float32)
net = dilated_cnn(images, num_filters, is_training)
# The probability distribution map.
prob = slim.conv2d(
net, num_kp, [3, 3], rate=1, scope="conv_xy", activation_fn=None)
# We added the fixed camera distance as a bias.
z = -30 + slim.conv2d(
net, num_kp, [3, 3], rate=1, scope="conv_z", activation_fn=None)
prob = tf.transpose(prob, [0, 3, 1, 2])
z = tf.transpose(z, [0, 3, 1, 2])
prob = tf.reshape(prob, [-1, num_kp, vh * vw])
prob = tf.nn.softmax(prob, name="softmax")
ranx, rany = meshgrid(vh)
prob = tf.reshape(prob, [-1, num_kp, vh, vw])
# These are for visualizing the distribution maps.
prob_viz = tf.expand_dims(tf.reduce_sum(prob, 1), 3)
prob_vizs = [tf.expand_dims(prob[:, i, :, :], 3) for i in range(num_kp)]
sx = tf.reduce_sum(prob * ranx, axis=[2, 3])
sy = tf.reduce_sum(prob * rany, axis=[2, 3]) # -> batch x num_kp
# [batch, num_kp]
sill = tf.reduce_sum(prob * tf.expand_dims(mask, 1), axis=[2, 3])
sill = tf.reduce_mean(-tf.log(sill + 1e-12))
z = tf.reduce_sum(prob * z, axis=[2, 3])
uv = tf.reshape(tf.stack([sx, sy], -1), [-1, num_kp, 2])
variance = variance_loss(prob, ranx, rany, uv)
return uv, z, orient, sill, variance, prob_viz, prob_vizs
def model_fn(features, labels, mode, hparams):
"""Returns model_fn for tf.estimator.Estimator."""
del labels
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
t = Transformer(vw, vh, FLAGS.dset)
def func1(x):
return tf.transpose(tf.reshape(features[x], [-1, 4, 4]), [0, 2, 1])
mv = [func1("mv%d" % i) for i in range(2)]
mvi = [func1("mvi%d" % i) for i in range(2)]
uvz = [None] * 2
uvz_proj = [None] * 2 # uvz coordinates projected on to the other view.
viz = [None] * 2
vizs = [None] * 2
loss_sill = 0
loss_variance = 0
loss_con = 0
loss_sep = 0
loss_lr = 0
for i in range(2):
with tf.variable_scope("KeypointNetwork", reuse=i > 0):
# anneal: 1 = using ground-truth, 0 = using our estimate orientation.
anneal = tf.to_float(hparams.lr_anneal_end - tf.train.get_global_step())
anneal = tf.clip_by_value(
anneal / (hparams.lr_anneal_end - hparams.lr_anneal_start), 0.0, 1.0)
uv, z, orient, sill, variance, viz[i], vizs[i] = keypoint_network(
features["img%d" % i],
hparams.num_filters,
hparams.num_kp,
is_training,
lr_gt=features["lr%d" % i],
anneal=anneal)
# x-positive/negative axes (dominant direction).
xp_axis = tf.tile(
tf.constant([[[1.0, 0, 0, 1], [-1.0, 0, 0, 1]]]),
[tf.shape(orient)[0], 1, 1])
# [batch, 2, 4] = [batch, 2, 4] x [batch, 4, 4]
xp = tf.matmul(xp_axis, mv[i])
# [batch, 2, 3]
xp = t.project(xp)
loss_lr += tf.losses.mean_squared_error(orient[:, :, :2], xp[:, :, :2])
loss_variance += variance
loss_sill += sill
uv = tf.reshape(uv, [-1, hparams.num_kp, 2])
z = tf.reshape(z, [-1, hparams.num_kp, 1])
# [batch, num_kp, 3]
uvz[i] = tf.concat([uv, z], axis=2)
world_coords = tf.matmul(t.unproject(uvz[i]), mvi[i])
# [batch, num_kp, 3]
uvz_proj[i] = t.project(tf.matmul(world_coords, mv[1 - i]))
pconf = tf.ones(
[tf.shape(uv)[0], tf.shape(uv)[1]], dtype=tf.float32) / hparams.num_kp
for i in range(2):
loss_con += consistency_loss(uvz_proj[i][:, :, :2], uvz[1 - i][:, :, :2],
pconf)
loss_sep += separation_loss(
t.unproject(uvz[i])[:, :, :3], hparams.sep_delta)
chordal, angular = relative_pose_loss(
t.unproject(uvz[0])[:, :, :3],
t.unproject(uvz[1])[:, :, :3], tf.matmul(mvi[0], mv[1]), pconf,
hparams.noise)
loss = (
hparams.loss_pose * angular +
hparams.loss_con * loss_con +
hparams.loss_sep * loss_sep +
hparams.loss_sill * loss_sill +
hparams.loss_lr * loss_lr +
hparams.loss_variance * loss_variance
)
def touint8(img):
return tf.cast(img * 255.0, tf.uint8)
with tf.variable_scope("output"):
tf.summary.image("0_img0", touint8(features["img0"][:, :, :, :3]))
tf.summary.image("1_combined", viz[0])
for i in range(hparams.num_kp):
tf.summary.image("2_f%02d" % i, vizs[0][i])
with tf.variable_scope("stats"):
tf.summary.scalar("anneal", anneal)
tf.summary.scalar("closs", loss_con)
tf.summary.scalar("seploss", loss_sep)
tf.summary.scalar("angular", angular)
tf.summary.scalar("chordal", chordal)
tf.summary.scalar("lrloss", loss_lr)
tf.summary.scalar("sill", loss_sill)
tf.summary.scalar("vloss", loss_variance)
return {
"loss": loss,
"predictions": {
"img0": features["img0"],
"img1": features["img1"],
"uvz0": uvz[0],
"uvz1": uvz[1]
},
"eval_metric_ops": {
"closs": tf.metrics.mean(loss_con),
"angular_loss": tf.metrics.mean(angular),
"chordal_loss": tf.metrics.mean(chordal),
}
}
def predict(input_folder, hparams):
"""Predicts keypoints on all images in input_folder."""
cols = plt.cm.get_cmap("rainbow")(
np.linspace(0, 1.0, hparams.num_kp))[:, :4]
img = tf.placeholder(tf.float32, shape=(1, 128, 128, 4))
with tf.variable_scope("KeypointNetwork"):
ret = keypoint_network(
img, hparams.num_filters, hparams.num_kp, False)
uv = tf.reshape(ret[0], [-1, hparams.num_kp, 2])
z = tf.reshape(ret[1], [-1, hparams.num_kp, 1])
uvz = tf.concat([uv, z], axis=2)
sess = tf.Session()
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir)
print("loading model: ", ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
files = [x for x in os.listdir(input_folder)
if x[-3:] in ["jpg", "png"]]
output_folder = os.path.join(input_folder, "output")
if not os.path.exists(output_folder):
os.mkdir(output_folder)
for f in files:
orig = misc.imread(os.path.join(input_folder, f)).astype(float) / 255
if orig.shape[2] == 3:
orig = np.concatenate((orig, np.ones_like(orig[:, :, :1])), axis=2)
uv_ret = sess.run(uvz, feed_dict={img: np.expand_dims(orig, 0)})
utils.draw_ndc_points(orig, uv_ret.reshape(hparams.num_kp, 3), cols)
misc.imsave(os.path.join(output_folder, f), orig)
def _default_hparams():
"""Returns default or overridden user-specified hyperparameters."""
hparams = tf.contrib.training.HParams(
num_filters=64, # Number of filters.
num_kp=10, # Numer of keypoints.
loss_pose=0.2, # Pose Loss.
loss_con=1.0, # Multiview consistency Loss.
loss_sep=1.0, # Seperation Loss.
loss_sill=1.0, # Sillhouette Loss.
loss_lr=1.0, # Orientation Loss.
loss_variance=0.5, # Variance Loss (part of Sillhouette loss).
sep_delta=0.05, # Seperation threshold.
noise=0.1, # Noise added during estimating rotation.
learning_rate=1.0e-3,
lr_anneal_start=30000, # When to anneal in the orientation prediction.
lr_anneal_end=60000, # When to use the prediction completely.
)
if FLAGS.hparams:
hparams = hparams.parse(FLAGS.hparams)
return hparams
def main(argv):
del argv
hparams = _default_hparams()
if FLAGS.predict:
predict(FLAGS.input, hparams)
else:
utils.train_and_eval(
model_dir=FLAGS.model_dir,
model_fn=model_fn,
input_fn=create_input_fn,
hparams=hparams,
steps=FLAGS.steps,
batch_size=FLAGS.batch_size,
save_checkpoints_secs=600,
eval_throttle_secs=1800,
eval_steps=5,
sync_replicas=FLAGS.sync_replicas,
)
if __name__ == "__main__":
sys.excepthook = utils.colored_hook(
os.path.dirname(os.path.realpath(__file__)))
tf.app.run()
| apache-2.0 |
MJuddBooth/pandas | pandas/tests/io/msgpack/test_seq.py | 3 | 1171 | # coding: utf-8
import io
import pandas.io.msgpack as msgpack
binarydata = bytes(bytearray(range(256)))
def gen_binary_data(idx):
return binarydata[:idx % 300]
def test_exceeding_unpacker_read_size():
dumpf = io.BytesIO()
packer = msgpack.Packer()
NUMBER_OF_STRINGS = 6
read_size = 16
# 5 ok for read_size=16, while 6 glibc detected *** python: double free or
# corruption (fasttop):
# 20 ok for read_size=256, while 25 segfaults / glibc detected *** python:
# double free or corruption (!prev)
# 40 ok for read_size=1024, while 50 introduces errors
# 7000 ok for read_size=1024*1024, while 8000 leads to glibc detected ***
# python: double free or corruption (!prev):
for idx in range(NUMBER_OF_STRINGS):
data = gen_binary_data(idx)
dumpf.write(packer.pack(data))
f = io.BytesIO(dumpf.getvalue())
dumpf.close()
unpacker = msgpack.Unpacker(f, read_size=read_size, use_list=1)
read_count = 0
for idx, o in enumerate(unpacker):
assert type(o) == bytes
assert o == gen_binary_data(idx)
read_count += 1
assert read_count == NUMBER_OF_STRINGS
| bsd-3-clause |
voxlol/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
penguinscontrol/Spinal-Cord-Modeling | ClarkesNetwork/test2_dend.py | 1 | 6524 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 07 11:08:41 2016
@author: Radu
"""
import numpy
import simrun
import time
from ballandstick_clarke_new import ClarkeRelay
#from neuron import h,gui
from neuron import h
h.load_file('stdgui.hoc')
#h.load_file("stdrun.hoc")
from math import sin, cos, pi
from matplotlib import pyplot
from itertools import izip
from neuronpy.graphics import spikeplot
from neuronpy.util import spiketrain
def tweak_leak(cells, Ncells):
for a in range(Ncells):
cells[a].soma.el_clarke =\
(cells[a].soma.ina_clarke + cells[a].soma.ikrect_clarke\
+ cells[a].soma.icaN_clarke + cells[a].soma.icaL_clarke\
+ cells[a].soma.ikca_clarke + cells[a].soma.inap_clarke\
+ cells[a].soma.gl_clarke*cells[a].soma.v) / cells[a].soma.gl_clarke
cells[a].dend.e_pas =\
(cells[a].dend.g_pas * cells[a].dend.v) / cells[a].dend.g_pas
cells = []
N = 3
r = 50 # Radius of cell locations from origin (0,0,0) in microns
h.celsius = 37
h.v_init = -65
h.dt = 0.01
fih = h.FInitializeHandler(2,(tweak_leak,(cells,N)))
#Set up Draw
fig1 = pyplot.figure(figsize=(8,4))
ax1a = fig1.add_subplot(2,1,1)
ax2a = fig1.add_subplot(2,1,2, sharex = ax1a)
fig2 = pyplot.figure(figsize=(5,16))
ax1b = fig2.add_subplot(3,1,1)
ax2b = fig2.add_subplot(3,1,2, sharex = ax1b)
ax3b = fig2.add_subplot(3,1,3, sharex = ax1b)
ax1b.set_xlim([0,20])
ax1b.set_ylim([0,1])
#ax1b.set_ylim([-72,-60])
#step = 2.5e-2 #CaN
#step = 1e-5 #CaL
#step = 5e-2 #KCa
#step = 4e-5 #napbar
#step = 5 #tau_mc
#step = 1 #tau_hc
#step = 1e-3 #dap weight
#step = 5e-2 # gkrect
#step = 0.01 #Na
#step = 5 #tau_mp_bar
#step = 1 # tau_n_bar
#step = 1e-2 #stim2
step = 0.2
syn_loc = 0
num_steps = 0
for i in numpy.linspace(0, step*num_steps, num_steps+1):
#stim2.amp += step
cells[:] = []
#syn_loc = i
for a in range(N):
cell = ClarkeRelay()
# When cells are created, the soma location is at (0,0,0) and
# the dendrite extends along the X-axis.
# First, at the origin, rotate about Z.
cell.rotateZ(a*2*pi/N)
# Then reposition
x_loc = sin(a * 2 * pi / N) * r
y_loc = cos(a * 2 * pi / N) * r
cell.set_position(x_loc, y_loc, 0)
cells.append(cell)
#cells[a].soma.gcaN_clarke = cells[a].soma.gcaN_clarke + step
#cells[a].soma.gcaL_clarke = cells[a].soma.gcaL_clarke + step
#cells[a].soma.gcak_clarke = cells[a].soma.gcak_clarke + step
#cells[a].soma.gnapbar_clarke = cells[a].soma.gnapbar_clarke + step
#cells[a].soma.tau_mc_clarke = cells[a].soma.tau_mc_clarke + step
#cells[a].soma.tau_hc_clarke = cells[a].soma.tau_hc_clarke + step
#cells[a].dap_nc_.weight[0] = cells[a].dap_nc_.weight[0] +step
#cells[a].soma.gkrect_clarke = cells[a].soma.gkrect_clarke + step
#cells[a].soma.tau_mp_bar_clarke = cells[a].soma.tau_mp_bar_clarke + step
#cells[a].soma.tau_n_bar_clarke = cells[a].soma.tau_n_bar_clarke + step
#cells[a].soma.gnabar_clarke = cells[a].soma.gnabar_clarke + step
#shape_window = h.PlotShape()
#shape_window.exec_menu('Show Diam')
cellSurface = h.area(0.5, sec = cells[0].soma)
## Make a netstim
stim = h.NetStim() # Make a new stimulator
stim.interval = 150
stim.noise = 0
stim.number = 1
stim.start = 9200
## Attach it to a synapse at syn_loc
ncstim = h.NetCon(stim, cells[0].syn_I)
ncstim.delay = 0
ncstim.weight[0] = 1.75e-3 # NetCon weight is a vector.
#Stims and clamps
#stim = h.IClamp(cells[0].dend(syn_loc))
#stim.delay = 200
#stim.dur = .25
#stim.amp = 1e-2
#clamp = h.SEClamp(cells[0].soma(0.5))
#clamp.dur1 = 1e9
#clamp.amp1 = -65
#clamp.rs = 1e2
stim2 = h.IClamp(cells[0].soma(0.5))
stim2.delay = 9500
stim2.dur = 300
stim2.amp = 0
#stim.amp = stim.amp-stim2.amp
#stim.amp = 0
soma_v_vec, soma_m_vec, soma_h_vec, soma_n_vec,\
soma_inap_vec, soma_idap_vec, soma_ical_vec,\
soma_ican_vec, soma_ikca_vec, soma_ina_vec, soma_ikrect_vec,\
dend_v_vec, t_vec\
= simrun.set_recording_vectors(cells[0])
# Set recording vectors
syn_i_vec = h.Vector()
syn_i_vec.record(cells[0].syn_I._ref_i)
simrun.simulate()
vs = numpy.array(soma_v_vec.to_python())
vd = numpy.array(dend_v_vec.to_python())
t = numpy.array(t_vec.to_python())
tempVrest = vs[abs(t-9195) < 2*h.dt]
Vrest = tempVrest[0]
tempVrest = vd[abs(t-9195) < 2*h.dt]
Vrestd = tempVrest[0]
#time.sleep(1)
lWid = 1
soma_plot = ax1a.semilogy(t-9198, vs-Vrest, color='black', lw=lWid)
dend_plot = ax1a.semilogy(t-9198, vd-Vrestd, color='red', lw=lWid)
ax1a.set_ylim([0.1,2])
ax1a.set_xlim([0,20])
syn_plot = ax2a.plot(t_vec, syn_i_vec, color='blue', lw=lWid)
ax2a.legend(syn_plot,\
['injected current'])
lWid = 1
soma_plot = ax1b.plot(t-9198, vs-Vrest, color='black', lw=lWid)
m_plot = ax2b.plot(t_vec, soma_m_vec, color='blue', lw=lWid)
h_plot = ax2b.plot(t_vec, soma_h_vec, color='red', lw=lWid)
n_plot = ax2b.plot(t_vec, soma_n_vec, color='black', lw=lWid)
cal_plot = ax3b.plot(t_vec, soma_ical_vec, color='blue', lw=lWid)
can_plot = ax3b.plot(t_vec, soma_ican_vec, color='red', lw=lWid)
#ina_plot = ax3b.plot(t_vec, soma_ina_vec, color='magenta', lw=lWid)
#ikrect_plot = ax3b.plot(t_vec, soma_ikrect_vec, color='cyan', lw=lWid)
kca_plot = ax3b.plot(t_vec, soma_ikca_vec, color='black', lw=lWid)
nap_plot = ax3b.plot(t_vec, soma_inap_vec, color='green', lw=lWid)
soma_idap_mAcm2 = numpy.array(soma_idap_vec.to_python())
soma_idap_mAcm2 = soma_idap_mAcm2/(100*cellSurface)
dap_plot = ax3b.plot(numpy.array(t_vec.to_python()),\
soma_idap_mAcm2, color='orange', lw=lWid)
#dap_plot = ax3b.plot(t_vec, soma_idap_vec, color='orange')
rev_plot = ax1b.plot([t_vec[0], t_vec[-1]], [0, 0],
color='blue', linestyle=':')
ax1a.legend(soma_plot + dend_plot,
['soma', 'dend(0.5)', 'syn reversal'])
ax1a.set_ylabel('mV')
ax2a.set_ylabel(h.units('ExpSyn.i'))
ax2a.set_xlabel('time (ms)')
ax1b.set_ylabel('mV')
ax1b.legend(soma_plot + rev_plot,['V_{soma}', 'V_rest'])
ax2b.legend(m_plot + h_plot + n_plot, ['m', 'h', 'n'])
ax2b.set_xlabel('time (ms)')
ax3b.legend(cal_plot + can_plot +\
kca_plot + nap_plot + dap_plot,\
['CaL', 'CaN', 'KCa', 'NaP', 'DAP'])
ax3b.set_xlabel('time (ms)')
pyplot.show()
#h.quit() | gpl-2.0 |
nhejazi/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
buntyke/GPy | GPy/examples/regression.py | 8 | 18746 | # Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
Gaussian Processes regression examples
"""
try:
from matplotlib import pyplot as pb
except:
pass
import numpy as np
import GPy
def olympic_marathon_men(optimize=True, plot=True):
"""Run a standard Gaussian process regression on the Olympic marathon data."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.olympic_marathon_men()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
# set the lengthscale to be something sensible (defaults to 1)
m.kern.lengthscale = 10.
if optimize:
m.optimize('bfgs', max_iters=200)
if plot:
m.plot(plot_limits=(1850, 2050))
return m
def coregionalization_toy(optimize=True, plot=True):
"""
A simple demonstration of coregionalization on two sinusoidal functions.
"""
#build a design matrix with a column of integers indicating the output
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
#build a suitable set of observed variables
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = np.sin(X2) + np.random.randn(*X2.shape) * 0.05 + 2.
m = GPy.models.GPCoregionalizedRegression(X_list=[X1,X2], Y_list=[Y1,Y2])
if optimize:
m.optimize('bfgs', max_iters=100)
if plot:
slices = GPy.util.multioutput.get_slices([X1,X2])
m.plot(fixed_inputs=[(1,0)],which_data_rows=slices[0],Y_metadata={'output_index':0})
m.plot(fixed_inputs=[(1,1)],which_data_rows=slices[1],Y_metadata={'output_index':1},ax=pb.gca())
return m
def coregionalization_sparse(optimize=True, plot=True):
"""
A simple demonstration of coregionalization on two sinusoidal functions using sparse approximations.
"""
#build a design matrix with a column of integers indicating the output
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
#build a suitable set of observed variables
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = np.sin(X2) + np.random.randn(*X2.shape) * 0.05 + 2.
m = GPy.models.SparseGPCoregionalizedRegression(X_list=[X1,X2], Y_list=[Y1,Y2])
if optimize:
m.optimize('bfgs', max_iters=100)
if plot:
slices = GPy.util.multioutput.get_slices([X1,X2])
m.plot(fixed_inputs=[(1,0)],which_data_rows=slices[0],Y_metadata={'output_index':0})
m.plot(fixed_inputs=[(1,1)],which_data_rows=slices[1],Y_metadata={'output_index':1},ax=pb.gca())
pb.ylim(-3,)
return m
def epomeo_gpx(max_iters=200, optimize=True, plot=True):
"""
Perform Gaussian process regression on the latitude and longitude data
from the Mount Epomeo runs. Requires gpxpy to be installed on your system
to load in the data.
"""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.epomeo_gpx()
num_data_list = []
for Xpart in data['X']:
num_data_list.append(Xpart.shape[0])
num_data_array = np.array(num_data_list)
num_data = num_data_array.sum()
Y = np.zeros((num_data, 2))
t = np.zeros((num_data, 2))
start = 0
for Xpart, index in zip(data['X'], range(len(data['X']))):
end = start+Xpart.shape[0]
t[start:end, :] = np.hstack((Xpart[:, 0:1],
index*np.ones((Xpart.shape[0], 1))))
Y[start:end, :] = Xpart[:, 1:3]
num_inducing = 200
Z = np.hstack((np.linspace(t[:,0].min(), t[:, 0].max(), num_inducing)[:, None],
np.random.randint(0, 4, num_inducing)[:, None]))
k1 = GPy.kern.RBF(1)
k2 = GPy.kern.Coregionalize(output_dim=5, rank=5)
k = k1**k2
m = GPy.models.SparseGPRegression(t, Y, kernel=k, Z=Z, normalize_Y=True)
m.constrain_fixed('.*variance', 1.)
m.inducing_inputs.constrain_fixed()
m.Gaussian_noise.variance.constrain_bounded(1e-3, 1e-1)
m.optimize(max_iters=max_iters,messages=True)
return m
def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=10000, max_iters=300, optimize=True, plot=True):
"""
Show an example of a multimodal error surface for Gaussian process
regression. Gene 939 has bimodal behaviour where the noisy mode is
higher.
"""
# Contour over a range of length scales and signal/noise ratios.
length_scales = np.linspace(0.1, 60., resolution)
log_SNRs = np.linspace(-3., 4., resolution)
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.della_gatta_TRP63_gene_expression(data_set='della_gatta',gene_number=gene_number)
# data['Y'] = data['Y'][0::2, :]
# data['X'] = data['X'][0::2, :]
data['Y'] = data['Y'] - np.mean(data['Y'])
lls = GPy.examples.regression._contour_data(data, length_scales, log_SNRs, GPy.kern.RBF)
if plot:
pb.contour(length_scales, log_SNRs, np.exp(lls), 20, cmap=pb.cm.jet)
ax = pb.gca()
pb.xlabel('length scale')
pb.ylabel('log_10 SNR')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Now run a few optimizations
models = []
optim_point_x = np.empty(2)
optim_point_y = np.empty(2)
np.random.seed(seed=seed)
for i in range(0, model_restarts):
# kern = GPy.kern.RBF(1, variance=np.random.exponential(1.), lengthscale=np.random.exponential(50.))
kern = GPy.kern.RBF(1, variance=np.random.uniform(1e-3, 1), lengthscale=np.random.uniform(5, 50))
m = GPy.models.GPRegression(data['X'], data['Y'], kernel=kern)
m.likelihood.variance = np.random.uniform(1e-3, 1)
optim_point_x[0] = m.rbf.lengthscale
optim_point_y[0] = np.log10(m.rbf.variance) - np.log10(m.likelihood.variance);
# optimize
if optimize:
m.optimize('scg', xtol=1e-6, ftol=1e-6, max_iters=max_iters)
optim_point_x[1] = m.rbf.lengthscale
optim_point_y[1] = np.log10(m.rbf.variance) - np.log10(m.likelihood.variance);
if plot:
pb.arrow(optim_point_x[0], optim_point_y[0], optim_point_x[1] - optim_point_x[0], optim_point_y[1] - optim_point_y[0], label=str(i), head_length=1, head_width=0.5, fc='k', ec='k')
models.append(m)
if plot:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return m # (models, lls)
def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.RBF):
"""
Evaluate the GP objective function for a given data set for a range of
signal to noise ratios and a range of lengthscales.
:data_set: A data set from the utils.datasets director.
:length_scales: a list of length scales to explore for the contour plot.
:log_SNRs: a list of base 10 logarithm signal to noise ratios to explore for the contour plot.
:kernel: a kernel to use for the 'signal' portion of the data.
"""
lls = []
total_var = np.var(data['Y'])
kernel = kernel_call(1, variance=1., lengthscale=1.)
model = GPy.models.GPRegression(data['X'], data['Y'], kernel=kernel)
for log_SNR in log_SNRs:
SNR = 10.**log_SNR
noise_var = total_var / (1. + SNR)
signal_var = total_var - noise_var
model.kern['.*variance'] = signal_var
model.likelihood.variance = noise_var
length_scale_lls = []
for length_scale in length_scales:
model['.*lengthscale'] = length_scale
length_scale_lls.append(model.log_likelihood())
lls.append(length_scale_lls)
return np.array(lls)
def olympic_100m_men(optimize=True, plot=True):
"""Run a standard Gaussian process regression on the Rogers and Girolami olympics data."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.olympic_100m_men()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
# set the lengthscale to be something sensible (defaults to 1)
m.rbf.lengthscale = 10
if optimize:
m.optimize('bfgs', max_iters=200)
if plot:
m.plot(plot_limits=(1850, 2050))
return m
def toy_rbf_1d(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.toy_rbf_1d()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
if optimize:
m.optimize('bfgs')
if plot:
m.plot()
return m
def toy_rbf_1d_50(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.toy_rbf_1d_50()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
if optimize:
m.optimize('bfgs')
if plot:
m.plot()
return m
def toy_poisson_rbf_1d_laplace(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
optimizer='scg'
x_len = 30
X = np.linspace(0, 10, x_len)[:, None]
f_true = np.random.multivariate_normal(np.zeros(x_len), GPy.kern.RBF(1).K(X))
Y = np.array([np.random.poisson(np.exp(f)) for f in f_true])[:,None]
kern = GPy.kern.RBF(1)
poisson_lik = GPy.likelihoods.Poisson()
laplace_inf = GPy.inference.latent_function_inference.Laplace()
# create simple GP Model
m = GPy.core.GP(X, Y, kernel=kern, likelihood=poisson_lik, inference_method=laplace_inf)
if optimize:
m.optimize(optimizer)
if plot:
m.plot()
# plot the real underlying rate function
pb.plot(X, np.exp(f_true), '--k', linewidth=2)
return m
def toy_ARD(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize=True, plot=True):
# Create an artificial dataset where the values in the targets (Y)
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
# see if this dependency can be recovered
X1 = np.sin(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X2 = np.cos(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X3 = np.exp(np.sort(np.random.rand(num_samples, 1), 0))
X4 = np.log(np.sort(np.random.rand(num_samples, 1), 0))
X = np.hstack((X1, X2, X3, X4))
Y1 = np.asarray(2 * X[:, 0] + 3).reshape(-1, 1)
Y2 = np.asarray(4 * (X[:, 2] - 1.5 * X[:, 0])).reshape(-1, 1)
Y = np.hstack((Y1, Y2))
Y = np.dot(Y, np.random.rand(2, D));
Y = Y + 0.2 * np.random.randn(Y.shape[0], Y.shape[1])
Y -= Y.mean()
Y /= Y.std()
if kernel_type == 'linear':
kernel = GPy.kern.Linear(X.shape[1], ARD=1)
elif kernel_type == 'rbf_inv':
kernel = GPy.kern.RBF_inv(X.shape[1], ARD=1)
else:
kernel = GPy.kern.RBF(X.shape[1], ARD=1)
kernel += GPy.kern.White(X.shape[1]) + GPy.kern.Bias(X.shape[1])
m = GPy.models.GPRegression(X, Y, kernel)
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
# m.set_prior('.*lengthscale',len_prior)
if optimize:
m.optimize(optimizer='scg', max_iters=max_iters)
if plot:
m.kern.plot_ARD()
return m
def toy_ARD_sparse(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize=True, plot=True):
# Create an artificial dataset where the values in the targets (Y)
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
# see if this dependency can be recovered
X1 = np.sin(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X2 = np.cos(np.sort(np.random.rand(num_samples, 1) * 10, 0))
X3 = np.exp(np.sort(np.random.rand(num_samples, 1), 0))
X4 = np.log(np.sort(np.random.rand(num_samples, 1), 0))
X = np.hstack((X1, X2, X3, X4))
Y1 = np.asarray(2 * X[:, 0] + 3)[:, None]
Y2 = np.asarray(4 * (X[:, 2] - 1.5 * X[:, 0]))[:, None]
Y = np.hstack((Y1, Y2))
Y = np.dot(Y, np.random.rand(2, D));
Y = Y + 0.2 * np.random.randn(Y.shape[0], Y.shape[1])
Y -= Y.mean()
Y /= Y.std()
if kernel_type == 'linear':
kernel = GPy.kern.Linear(X.shape[1], ARD=1)
elif kernel_type == 'rbf_inv':
kernel = GPy.kern.RBF_inv(X.shape[1], ARD=1)
else:
kernel = GPy.kern.RBF(X.shape[1], ARD=1)
#kernel += GPy.kern.Bias(X.shape[1])
X_variance = np.ones(X.shape) * 0.5
m = GPy.models.SparseGPRegression(X, Y, kernel, X_variance=X_variance)
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
# m.set_prior('.*lengthscale',len_prior)
if optimize:
m.optimize(optimizer='scg', max_iters=max_iters)
if plot:
m.kern.plot_ARD()
return m
def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True):
"""Predict the location of a robot given wirelss signal strength readings."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.robot_wireless()
# create simple GP Model
m = GPy.models.GPRegression(data['Y'], data['X'], kernel=kernel)
# optimize
if optimize:
m.optimize(max_iters=max_iters)
Xpredict = m.predict(data['Ytest'])[0]
if plot:
pb.plot(data['Xtest'][:, 0], data['Xtest'][:, 1], 'r-')
pb.plot(Xpredict[:, 0], Xpredict[:, 1], 'b-')
pb.axis('equal')
pb.title('WiFi Localization with Gaussian Processes')
pb.legend(('True Location', 'Predicted Location'))
sse = ((data['Xtest'] - Xpredict)**2).sum()
print(('Sum of squares error on test data: ' + str(sse)))
return m
def silhouette(max_iters=100, optimize=True, plot=True):
"""Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper."""
try:import pods
except ImportError:
print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.silhouette()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
# optimize
if optimize:
m.optimize(messages=True, max_iters=max_iters)
print(m)
return m
def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100, optimize=True, plot=True, checkgrad=False):
"""Run a 1D example of a sparse GP regression."""
# sample inputs and outputs
X = np.random.uniform(-3., 3., (num_samples, 1))
Y = np.sin(X) + np.random.randn(num_samples, 1) * 0.05
# construct kernel
rbf = GPy.kern.RBF(1)
# create simple GP Model
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
if checkgrad:
m.checkgrad()
if optimize:
m.optimize('tnc', max_iters=max_iters)
if plot:
m.plot()
return m
def sparse_GP_regression_2D(num_samples=400, num_inducing=50, max_iters=100, optimize=True, plot=True, nan=False):
"""Run a 2D example of a sparse GP regression."""
np.random.seed(1234)
X = np.random.uniform(-3., 3., (num_samples, 2))
Y = np.sin(X[:, 0:1]) * np.sin(X[:, 1:2]) + np.random.randn(num_samples, 1) * 0.05
if nan:
inan = np.random.binomial(1,.2,size=Y.shape)
Y[inan] = np.nan
# construct kernel
rbf = GPy.kern.RBF(2)
# create simple GP Model
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
# contrain all parameters to be positive (but not inducing inputs)
m['.*len'] = 2.
m.checkgrad()
# optimize
if optimize:
m.optimize('tnc', messages=1, max_iters=max_iters)
# plot
if plot:
m.plot()
print(m)
return m
def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True):
"""Run a 1D example of a sparse GP regression with uncertain inputs."""
fig, axes = pb.subplots(1, 2, figsize=(12, 5), sharex=True, sharey=True)
# sample inputs and outputs
S = np.ones((20, 1))
X = np.random.uniform(-3., 3., (20, 1))
Y = np.sin(X) + np.random.randn(20, 1) * 0.05
# likelihood = GPy.likelihoods.Gaussian(Y)
Z = np.random.uniform(-3., 3., (7, 1))
k = GPy.kern.RBF(1)
# create simple GP Model - no input uncertainty on this one
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z)
if optimize:
m.optimize('scg', messages=1, max_iters=max_iters)
if plot:
m.plot(ax=axes[0])
axes[0].set_title('no input uncertainty')
print(m)
# the same Model with uncertainty
m = GPy.models.SparseGPRegression(X, Y, kernel=GPy.kern.RBF(1), Z=Z, X_variance=S)
if optimize:
m.optimize('scg', messages=1, max_iters=max_iters)
if plot:
m.plot(ax=axes[1])
axes[1].set_title('with input uncertainty')
fig.canvas.draw()
print(m)
return m
def simple_mean_function(max_iters=100, optimize=True, plot=True):
"""
The simplest possible mean function. No parameters, just a simple Sinusoid.
"""
#create simple mean function
mf = GPy.core.Mapping(1,1)
mf.f = np.sin
mf.update_gradients = lambda a,b: None
X = np.linspace(0,10,50).reshape(-1,1)
Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape)
k =GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
if optimize:
m.optimize(max_iters=max_iters)
if plot:
m.plot(plot_limits=(-10,15))
return m
def parametric_mean_function(max_iters=100, optimize=True, plot=True):
"""
A linear mean function with parameters that we'll learn alongside the kernel
"""
#create simple mean function
mf = GPy.core.Mapping(1,1)
mf.f = np.sin
X = np.linspace(0,10,50).reshape(-1,1)
Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape) + 3*X
mf = GPy.mappings.Linear(1,1)
k =GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
if optimize:
m.optimize(max_iters=max_iters)
if plot:
m.plot()
return m
| mit |
hsiaoyi0504/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
PythonProgramming/Pattern-Recognition-for-Forex-Trading | machFX7.py | 1 | 5605 | '''
To compare patterns:
use a % change calculation to calculate similarity between each %change
movement in the pattern finder. From those numbers, subtract them from 100, to
get a "how similar" #. From this point, take all 10 of the how similars,
and average them. Whichever pattern is MOST similar, is the one we will assume
we have found.
'''
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.dates as mdates
import numpy as np
from numpy import loadtxt
import time
date,bid,ask = np.loadtxt('GBPUSD1d.txt', unpack=True,
delimiter=',',
converters={0:mdates.strpdate2num('%Y%m%d%H%M%S')})
avgLine = ((bid+ask)/2)
####DEFINE######
#CHANGE#
patternAr = []
performanceAr = []
patForRec = []
def percentChange(startPoint,currentPoint):
try:
return ((float(currentPoint)-startPoint)/abs(startPoint))*100.00
except:
return 0
def patternStorage():
'''
The goal of patternFinder is to begin collection of %change patterns
in the tick data. From there, we also collect the short-term outcome
of this pattern. Later on, the length of the pattern, how far out we
look to compare to, and the length of the compared range be changed,
and even THAT can be machine learned to find the best of all 3 by
comparing success rates.
'''
#####
startTime = time.time()
# required to do a pattern array, because the liklihood of an identical
# %change across millions of patterns is fairly likely and would
# cause problems. IF it was a problem of identical patterns,
# then it wouldnt matter, but the % change issue
# would cause a lot of harm. Cannot have a list as a dictionary Key.
#MOVE THE ARRAYS THEMSELVES#
x = len(avgLine)-30
y = 11
currentStance = 'none'
while y < x:
pattern = []
p1 = percentChange(avgLine[y-10], avgLine[y-9])
p2 = percentChange(avgLine[y-10], avgLine[y-8])
p3 = percentChange(avgLine[y-10], avgLine[y-7])
p4 = percentChange(avgLine[y-10], avgLine[y-6])
p5 = percentChange(avgLine[y-10], avgLine[y-5])
p6 = percentChange(avgLine[y-10], avgLine[y-4])
p7 = percentChange(avgLine[y-10], avgLine[y-3])
p8 = percentChange(avgLine[y-10], avgLine[y-2])
p9 = percentChange(avgLine[y-10], avgLine[y-1])
p10= percentChange(avgLine[y-10], avgLine[y])
outcomeRange = avgLine[y+20:y+30]
currentPoint = avgLine[y]
#Define##########################
#########change to try except for safety
try:
avgOutcome = reduce(lambda x, y: x + y, outcomeRange) / len(outcomeRange)
except Exception, e:
print str(e)
avgOutcome = 0
#Define
futureOutcome = percentChange(currentPoint, avgOutcome)
#print some logics
'''
print 'where we are historically:',currentPoint
print 'soft outcome of the horizon:',avgOutcome
print 'This pattern brings a future change of:',futureOutcome
print '_______'
print p1, p2, p3, p4, p5, p6, p7, p8, p9, p10
'''
pattern.append(p1)
pattern.append(p2)
pattern.append(p3)
pattern.append(p4)
pattern.append(p5)
pattern.append(p6)
pattern.append(p7)
pattern.append(p8)
pattern.append(p9)
pattern.append(p10)
#can use .index to find the index value, then search for that value to get the matching information.
# so like, performanceAr.index(12341)
patternAr.append(pattern)
performanceAr.append(futureOutcome)
y+=1
#####
endTime = time.time()
print len(patternAr)
print len(performanceAr)
print 'Pattern storing took:', endTime-startTime
#####
####
####
def patternRecognition():
#mostRecentPoint = avgLine[-1]
patForRec = []
cp1 = percentChange(avgLine[-11],avgLine[-10])
cp2 = percentChange(avgLine[-11],avgLine[-9])
cp3 = percentChange(avgLine[-11],avgLine[-8])
cp4 = percentChange(avgLine[-11],avgLine[-7])
cp5 = percentChange(avgLine[-11],avgLine[-6])
cp6 = percentChange(avgLine[-11],avgLine[-5])
cp7 = percentChange(avgLine[-11],avgLine[-4])
cp8 = percentChange(avgLine[-11],avgLine[-3])
cp9 = percentChange(avgLine[-11],avgLine[-2])
cp10= percentChange(avgLine[-11],avgLine[-1])
patForRec.append(cp1)
patForRec.append(cp2)
patForRec.append(cp3)
patForRec.append(cp4)
patForRec.append(cp5)
patForRec.append(cp6)
patForRec.append(cp7)
patForRec.append(cp8)
patForRec.append(cp9)
patForRec.append(cp10)
print patForRec
def graphRawFX():
fig=plt.figure(figsize=(10,7))
ax1 = plt.subplot2grid((40,40), (0,0), rowspan=40, colspan=40)
ax1.plot(date,bid)
ax1.plot(date,ask)
#ax1.plot(date,((bid+ask)/2))
#ax1.plot(date,percentChange(ask[0],ask),'r')
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S'))
#####
plt.grid(True)
for label in ax1.xaxis.get_ticklabels():
label.set_rotation(45)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
#######
ax1_2 = ax1.twinx()
#ax1_2.plot(date, (ask-bid))
ax1_2.fill_between(date, 0, (ask-bid), facecolor='g',alpha=.3)
#ax1_2.set_ylim(0, 3*ask.max())
#######
plt.subplots_adjust(bottom=.23)
#plt.grid(True)
plt.show()
| mit |
xwolf12/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
caisq/tensorflow | tensorflow/contrib/timeseries/examples/predict_test.py | 80 | 2487 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the TensorFlow parts of the prediction example run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
from tensorflow.contrib.timeseries.examples import predict
from tensorflow.python.platform import test
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/period_trend.csv")
class PeriodTrendExampleTest(test.TestCase):
def test_shapes_and_variance_structural(self):
(times, observed, all_times, mean, upper_limit, lower_limit
) = predict.structural_ensemble_train_and_predict(_DATA_FILE)
# Just check that plotting will probably be OK. We can't actually run the
# plotting code since we don't want to pull in matplotlib as a dependency
# for this test.
self.assertAllEqual([500], times.shape)
self.assertAllEqual([500], observed.shape)
self.assertAllEqual([700], all_times.shape)
self.assertAllEqual([700], mean.shape)
self.assertAllEqual([700], upper_limit.shape)
self.assertAllEqual([700], lower_limit.shape)
# Check that variance hasn't blown up too much. This is a relatively good
# indication that training was successful.
self.assertLess(upper_limit[-1] - lower_limit[-1],
1.5 * (upper_limit[0] - lower_limit[0]))
def test_ar(self):
(times, observed, all_times, mean,
upper_limit, lower_limit) = predict.ar_train_and_predict(_DATA_FILE)
self.assertAllEqual(times.shape, observed.shape)
self.assertAllEqual(all_times.shape, mean.shape)
self.assertAllEqual(all_times.shape, upper_limit.shape)
self.assertAllEqual(all_times.shape, lower_limit.shape)
self.assertLess((upper_limit - lower_limit).mean(), 4.)
if __name__ == "__main__":
test.main()
| apache-2.0 |
CompPhysics/ThesisProjects | doc/MSc/msc_students/former/AudunHansen/Audun/Pythonscripts/diag_draw.py | 1 | 16682 | from numpy import *
from matplotlib.pyplot import *
def nconnect(n1,n2,S,order="l0", p_h = None):
N = 60
Phx = (n1.x+n2.x)/2.0
Phy = (n1.y+n2.y)/2.0
lP = sqrt((n2.x-n1.x)**2 + (n2.y-n1.y)**2)
dPx = (n2.x-n1.x)/lP
dPy = (n2.y-n1.y)/lP
Cx = Phx - S*dPy
Cy = Phy + S*dPx
lC = sqrt((S*dPy)**2 + (S*dPx)**2)
#node(Phx,Phy, c="blue")
#node(Cx,Cy, c="red")
R = sqrt((Cx-n1.x)**2 + (Cy-n1.y)**2)
lPC0 = sqrt(((Cx+R)-n1.x)**2 + (Cy - n1.y)**2)
lPC1 = sqrt(((Cx+R)-n2.x)**2 + (Cy - n2.y)**2)
dalpha = arccos((2*R**2 - lP**2)/(2.0*R**2))
CPx = n1.x - Cx
CPy = n1.y - Cy
X,Y = 0,0
if order == "0":
#X = [n1.x, n2.x]
#Y = [n1.y, n2.y]
X = linspace(n1.x, n2.x, 6) #need to do this to get arrows working
Y = linspace(n1.y, n2.y, 6)
if order == "l0":
if S<0:
dalpha = 2*pi - dalpha
A = linspace(0,dalpha, N)
X,Y = rotate_v(CPx,CPy,A)
X+=Cx
Y+=Cy
if order == "r0":
if S>0:
dalpha = 2*pi - dalpha
A = linspace(0,-dalpha, N)
X,Y = rotate_v(CPx,CPy,A)
X+=Cx
Y+=Cy
msize = 10
if p_h == 1:
draw_arrow([X[len(X)/2],Y[len(X)/2]], [-dPx,-dPy])
#X[len(X)/2],Y[len(X)/2]
#plot(X[len(X)/2],Y[len(X)/2], "^", color = "black", markersize = msize)
if p_h == -1:
draw_arrow([X[len(X)/2],Y[len(X)/2]], [dPx,dPy])
#plot(X[len(X)/2],Y[len(X)/2], "v", color = "black", markersize = msize)
plot(X,Y, color = "black")
def ncon(n1,n2,order = 0, p_h = None):
if order == 0:
nconnect(n1,n2,1,"0", p_h)
if order > 0:
nconnect(n1,n2,(-2+order),"l0", p_h)
if order < 0:
nconnect(n1,n2,(-2-order),"r0", p_h)
def draw_arrow(pos, point, s = .2, h = .1):
#Draw an arrow at pos, pointing in the direction of point.
#normalize direction
p2 = sqrt(point[0]**2 + point[1]**2)
point[0] /= p2
point[1] /= p2
#pi/2 degree rotation
p_rotx, p_roty = point[1], -point[0]
x0, y0 = pos[0], pos[1]
x1, y1 = pos[0] - s*point[0], pos[1] - s*point[1]
#plot the arrow
plot([x0, x1+h*p_rotx],[y0, y1+h*p_roty], color = "black")
plot([x0, x1-h*p_rotx],[y0, y1-h*p_roty], color = "black")
def rotate_v(x,y,alpha):
ca = cos(alpha)
sa = sin(alpha)
return ca*x - sa*y, sa*x + ca*y
def nodedraw(p):
plot(p.x, p.y,"o", color = "black", markersize = 5)
class node():
def __init__(self, x,y):
self.x = x
self.y = y
def node_connect(n1, n2, feature, label = None):
if feature == 0:
#amplitude/solid operator
plot([n1.x, n2.x], [n1.y, n2.y], color = "Black")
if label:
text((n2.x-n1.x)/2.0 + label[0],(n2.y-n1.y)/2.0, label[1])
if feature == 1:
#interaction vertex
plot([n1.x, n2.x], [n1.y, n2.y], "--",color = "Black")
if feature == 2:
#particle
plot([n1.x, n2.x], [n1.y, n2.y], color = "Black")
draw_arrow([n1.x + (n2.x-n1.x)/2.0,n1.y + (n2.y-n1.y)/2.0], [n2.x-n1.x,n2.y-n1.y])
if label:
text(n1.x + (n2.x-n1.x)/2.0 + label[0],n1.y + (n2.y-n1.y)/2.0, label[1])
if feature == 3:
#hole
plot([n1.x, n2.x], [n1.y, n2.y], color = "Black")
draw_arrow([n1.x + (n2.x-n1.x)/2.0,n1.y + (n2.y-n1.y)/2.0], [-(n2.x-n1.x),-(n2.y-n1.y)])
if label:
text(n1.x + (n2.x-n1.x)/2.0 + label[0],n1.y +(n2.y-n1.y)/2.0, label[1])
if feature == 4:
#particle+hole
ncon(n1, n2, -1, 1)
ncon(n2, n1, -1, 1)
if label:
text(n1.x + (n2.x-n1.x)/2.0 - label[0],n1.y + (n2.y-n1.y)/2.0, label[1])
text(n1.x + (n2.x-n1.x)/2.0 + label[0],n1.y + (n2.y-n1.y)/2.0, label[2])
if feature == 5:
#connect all nodes in list n1 using solid vertex
for i in range(len(n1)-1):
node_connect(n1[i], n1[i+1], 0)
nodedraw(n1[i])
nodedraw(n1[-1])
if feature == 6:
#connect all nodes in list n1 using dotted vertex
for i in range(len(n1)-1):
node_connect(n1[i], n1[i+1], 1)
nodedraw(n1[i])
nodedraw(n1[-1])
def Cr(n):
return "\\hat{a}_%s^\\dagger" %n
def An(n):
return "\\hat{a}_%s" %n
def simple_lines():
spread = .2
diagname = "$\\vert \\Phi^{a}_{i} \\rangle$"
figure(figsize = (4,3), dpi = 80, edgecolor = 'k',facecolor='white')
plot([-.1,1.1], [-.1,1.1], color = "white")
axis('off')
title(diagname)
axes().set_aspect('equal', 'datalim')
hold("on")
#interaction = [node(0,1), node(1,1)]
above = [node(-spread, 2), node(spread, 2), node(1-spread, 2), node(1+spread, 2)]
below = [node(-spread, 0), node(spread, 0), node(1-spread, 0), node(1+spread, 0)]
node_connect(below[0], above[0], 2, [-.2, "a"])
#node_connect(below[3], above[3], 3, [.2, "j"])
node_connect(below[3], above[3], 3, [-.2, "i"])
#node_connect(below[2], above[2], 2, [.2, "b"])
show()
def contraction():
spread = .4
#diagname = "$\\sum_{abij} \\langle ai \\vert \\vert bj \\rangle \\hat{a}_i^\\dagger$"
#"\hat{F}_N = "
#diagname ="$\\sum_{ij} f_{ij} \\hat{a}_j \\hat{a}_i^{\\dagger}$"
#diagname ="$\\sum_{ab} f_{ab} \\hat{a}_a^{\\dagger} \\hat{a}_b$"
#diagname ="$\\sum_{ai} f_{ia} \\hat{a}_i^{\\dagger} \\hat{a}_a$"
#diagname ="$\\sum_{ai} f_{ai} \\hat{a}_i \\hat{a}_a^{\\dagger}$"
diagname ="$\\sum_{bc} f_{bc} \\hat{a}_b^{\\dagger} \\hat{a}_c$"
figure(figsize = (3,4), dpi = 80, edgecolor = 'k',facecolor='white')
plot([-.1,2.2], [-.1,2.2], color = "white")
axis('off')
title("Example of contraction")
axes().set_aspect('equal', 'datalim')
hold("on")
interaction = [node(.5,1), node(1,1)]
above = [node(-spread, 2), node(spread, 2), node(1-spread, 2), node(1+spread, 2)]
below = [node(-spread, 0), node(spread, 0), node(1-spread, 0), node(1+spread, 0)]
SDbl = [node(-spread, -2), node(spread, -2), node(1-spread, -2), node(1+spread, -2)]
SDab = [node(-spread, -.5), node(spread, -.5), node(1-spread, -.5), node(1+spread, -.5)]
#node_connect(interaction, above, 6)
node_connect(interaction[0], interaction[1], 1)
nodedraw(interaction[1])
#node_connect(below[0], above[0], 3)
node_connect(interaction[1], above[-1], 2, [-.2, "b"])
#node_connect(interaction[1], above[-2], 3, [-.2, "i"])
#node_connect(interaction[1], below[-1], 2, [-.2, "i"])
node_connect(interaction[1], below[-1], 3, [-.2, "c"])
#node_connect(interaction[1], below[-1], 3)
node_connect(SDbl[1], SDab[1], 3, [-.2, "i"])
node_connect(SDbl[3], SDab[3], 2, [-.2, "a"])
#node_connect(interaction[1], below[-1], 3)
text(2, 0, "=")
h = -.5
v = .2
interaction = [node(2.6 +v,.5+ h), node(3 + v,.5 +h)]
above = [node(2 + v -spread, 1.5+h), node(2 +spread + v, 1.5+h), node(3-spread + v, 1.5+h), node(3+spread +v, 1.5+h)]
below = [node(2 + v -spread, -.5+h), node(2 +spread + v, -.5+h), node(3-spread + v, -.5+h), node(3+spread +v, -.5+h)]
node_connect(interaction[0], interaction[1], 1)
nodedraw(interaction[1])
#node_connect(below[0], above[0], 3)
node_connect(interaction[1], above[-1], 2, [-.2, "b"])
#node_connect(interaction[1], above[-2], 3, [-.2, "i"])
#node_connect(interaction[1], below[-1], 2, [-.2, "i"])
node_connect(interaction[1], below[-1], 3, [-.2, "a"])
node_connect(below[1], above[1], 2, [-.2, "i"])
#node_connect(n1,n2,2, [-.2, "a"])
#ncon(n1, n2, -1, 1)
#ncon(n2, n1, -1, 1)
#nodedraw(n1)
show()
def onebody_op():
spread = .4
#diagname = "$\\sum_{abij} \\langle ai \\vert \\vert bj \\rangle \\hat{a}_i^\\dagger$"
#"\hat{F}_N = "
#diagname ="$\\sum_{ij} f_{ij} \\hat{a}_j \\hat{a}_i^{\\dagger}$"
diagname ="$\\sum_{bc} f_{bc} \\hat{a}_b^{\\dagger} \\hat{a}_c$"
#diagname ="$\\sum_{ai} f_{ia} \\hat{a}_i^{\\dagger} \\hat{a}_b$"
#diagname ="$\\sum_{ai} f_{ai} \\hat{a}_i \\hat{a}_a^{\\dagger}$"
figure(figsize = (3,4), dpi = 80, edgecolor = 'k',facecolor='white')
plot([-.1,2.2], [-.1,2.2], color = "white")
axis('off')
title(diagname)
axes().set_aspect('equal', 'datalim')
hold("on")
interaction = [node(.5,1), node(1,1)]
above = [node(-spread, 2), node(spread, 2), node(1-spread, 2), node(1+spread, 2)]
below = [node(-spread, 0), node(spread, 0), node(1-spread, 0), node(1+spread, 0)]
#node_connect(interaction, above, 6)
node_connect(interaction[0], interaction[1], 1)
nodedraw(interaction[1])
#node_connect(below[0], above[0], 3)
#node_connect(interaction[1], above[-1], 2, [-.2, "a"])
#node_connect(interaction[1], above[-2], 3, [-.2, "i"])
node_connect(interaction[1], below[-1], 2, [-.2, "i"])
node_connect(interaction[1], below[-2], 3, [-.2, "a"])
#node_connect(interaction[1], below[-1], 3)
#node_connect(n1,n2,2, [-.2, "a"])
#ncon(n1, n2, -1, 1)
#ncon(n2, n1, -1, 1)
#nodedraw(n1)
show()
def normal_hamilt():
#draw_arrow([0,0], [1,1])
spread = .3
#diagname = "$\\sum_{abij} \\langle ai \\vert \\vert bj \\rangle \\hat{a}_i^\\dagger$"
a = "a"
b = "b"
c = "c"
d = "d"
i = "i"
j = "j"
k = "k"
l = "l"
#diagname = "$\\sum_{abcd} \\langle ab\\vert \\hat{g} \\vert cd \\rangle \\{ %s %s %s %s \\}$" % (Cr(a), Cr(b), An(d), An(c))
#diagname ="$\\sum_{ijkl} \\langle ij\\vert \\hat{g} \\vert kl \\rangle \\{ %s %s %s %s \\}$" % (Cr(i), Cr(j), An(l), An(k))
#diagname ="$\\sum_{aibj} \\langle ij\\vert \\hat{g} \\vert bj \\rangle \\{ %s %s %s %s \\}$" % (Cr(a), Cr(i), An(j), An(b))
#diagname ="$\\sum_{abci} \\langle ab\\vert \\hat{g} \\vert ci \\rangle \\{ %s %s %s %s \\}$" % (Cr(a), Cr(b), An(i), An(c))
#diagname ="$\\sum_{iajk} \\langle ia\\vert \\hat{g} \\vert jk \\rangle \\{ %s %s %s %s \\}$" % (Cr(i), Cr(a), An(k), An(j))
#diagname ="$\\sum_{aibc} \\langle ai\\vert \\hat{g} \\vert bc \\rangle \\{ %s %s %s %s \\}$" % (Cr(a), Cr(i), An(b), An(c))
#diagname ="$\\sum_{ijka} \\langle ij\\vert \\hat{g} \\vert ka \\rangle \\{ %s %s %s %s \\}$" % (Cr(i), Cr(j), An(a), An(k))
#diagname ="$\\sum_{abij} \\langle ab\\vert \\hat{g} \\vert ij \\rangle \\{ %s %s %s %s \\}$" % (Cr(a), Cr(b), An(j), An(i))
diagname ="$\\sum_{ijab} \\langle ij\\vert \\hat{g} \\vert ab \\rangle \\{ %s %s %s %s \\}$" % (Cr(i), Cr(j), An(b), An(a))
figure(figsize = (1.0,3.1), dpi = 80, edgecolor = 'k',facecolor='white')
plot([-.4,1.4], [-.3,1.4], color = "white")
axis('off')
#title(diagname)
text(-.5,1.4,diagname, size = 13)
text(-.3,-.3,"+", size = 20)
text(.2,-.3,"-", size = 20)
text(.7,-.3,"+", size = 20)
text(1.2,-.3,"-", size = 20)
axes().set_aspect('equal', 'datalim')
hold("on")
interaction = [node(0,1), node(1,1)]
above = [node(-spread, 2), node(spread, 2), node(1-spread, 2), node(1+spread, 2)]
below = [node(-spread, 0), node(spread, 0), node(1-spread, 0), node(1+spread, 0)]
node_connect(interaction, above, 6)
node_connect(interaction[0], below[0], 3,[-.2, ""]) #a out
node_connect(interaction[0], below[1], 2,[-.2, ""]) #i out
node_connect(interaction[1], below[-2], 3,[-.2, ""]) #b out
node_connect(interaction[1], below[-1], 2,[-.2, ""]) #j out
#node_connect(interaction[0], below[0], 2,[-.2, "i"]) #a out
#node_connect(interaction[1], below[-2], 2,[-.2, "j"]) #i out
#node_connect(interaction[1], below[-1], 3,[-.2, "a"]) #b out
#node_connect(interaction[1], below[-1], 2,[-.2, "j"]) #j out
#node_connect(interaction[0], below[0], 2)
#node_connect(interaction[1], below[-1], 3)
#node_connect(n1,n2,2, [-.2, "a"])
#ncon(n1, n2, -1, 1)
#ncon(n2, n1, -1, 1)
#nodedraw(n1)
show()
def normal_hamilt2():
#draw_arrow([0,0], [1,1])
spread = .3
#diagname = "$\\sum_{abij} \\langle ai \\vert \\vert bj \\rangle \\hat{a}_i^\\dagger$"
a = "a"
b = "b"
c = "c"
d = "d"
i = "i"
j = "j"
k = "k"
l = "l"
#diagname = "$\\sum_{abcd} \\langle ab\\vert \\hat{g} \\vert cd \\rangle \\{ %s %s %s %s \\}$" % (Cr(a), Cr(b), An(d), An(c))
#diagname ="$\\sum_{ijkl} \\langle ij\\vert \\hat{g} \\vert kl \\rangle \\{ %s %s %s %s \\}$" % (Cr(i), Cr(j), An(l), An(k))
#diagname ="$\\sum_{aibj} \\langle ij\\vert \\hat{g} \\vert bj \\rangle \\{ %s %s %s %s \\}$" % (Cr(a), Cr(i), An(j), An(b))
#diagname ="$\\sum_{abci} \\langle ab\\vert \\hat{g} \\vert ci \\rangle \\{ %s %s %s %s \\}$" % (Cr(a), Cr(b), An(i), An(c))
#diagname ="$\\sum_{iajk} \\langle ia\\vert \\hat{g} \\vert jk \\rangle \\{ %s %s %s %s \\}$" % (Cr(i), Cr(a), An(k), An(j))
#diagname ="$\\sum_{aibc} \\langle ai\\vert \\hat{g} \\vert bc \\rangle \\{ %s %s %s %s \\}$" % (Cr(a), Cr(i), An(b), An(c))
#diagname ="$\\sum_{ijka} \\langle ij\\vert \\hat{g} \\vert ka \\rangle \\{ %s %s %s %s \\}$" % (Cr(i), Cr(j), An(a), An(k))
#diagname ="$\\sum_{abij} \\langle ab\\vert \\hat{g} \\vert ij \\rangle \\{ %s %s %s %s \\}$" % (Cr(a), Cr(b), An(j), An(i))
diagname ="$\\sum_{ijab} \\langle ij\\vert \\hat{g} \\vert ab \\rangle \\{ %s %s %s %s \\}$" % (Cr(i), Cr(j), An(b), An(a))
figure(figsize = (1.0,3.1), dpi = 80, edgecolor = 'k',facecolor='white')
#plot([-.5,1.4], [-.0,1.4], color = "white")
axis('off')
#title(diagname)
#text(-.5,1.4,diagname, size = 13)
#text(-.3,-.3,"+", size = 20)
#text(.2,-.3,"-", size = 20)
#text(.7,-.3,"+", size = 20)
#text(1.2,-.3,"-", size = 20)
axes().set_aspect('equal', 'datalim')
hold("on")
interaction = [node(0,1), node(2,1)]
above = [node(-spread, 2), node(spread, 2), node(1-spread, 2), node(1+spread, 2)]
below = [node(-spread, -1), node(spread, -1), node(2-spread, -1), node(2+spread, -1)]
node_connect(interaction, above, 6)
node_connect(interaction[0], below[0], 3,[-.2, ""]) #a out
node_connect(interaction[0], below[1], 2,[-.2, ""]) #i out
#node_connect(interaction[1], below[-2], 3,[-.2, ""]) #b out
#node_connect(interaction[1], below[-1], 2,[-.2, ""]) #j out
#node_connect(interaction[0], below[0], 2,[-.2, "i"]) #a out
#node_connect(interaction[1], below[-2], 2,[-.2, "j"]) #i out
#node_connect(interaction[1], below[-1], 3,[-.2, "a"]) #b out
#node_connect(interaction[1], below[-1], 2,[-.2, "j"]) #j out
#node_connect(interaction[0], below[0], 2)
#node_connect(interaction[1], below[-1], 3)
#node_connect(n1,n2,2, [-.2, "a"])
#ncon(n1, n2, -1, 1)
#ncon(n2, n1, -1, 1)
#nodedraw(n1)
show()
def sign(node, s, sc):
if s==0:
text(node.x-.09-.2, node.y+sc, "-", size = 17)
else:
text(node.x+.09-.2, node.y+sc, "+", size = 17)
def clusters():
spread = .4
diagname = "$\hat{T}_1$"
figure(figsize = (4,1.5), dpi = 80, edgecolor = 'k',facecolor='white')
plot([-1,1], [-1,2.4], color = "white")
axis('off')
#title(diagname)
axes().set_aspect('equal', 'datalim')
hold("on")
#interaction = [node(0,1), node(1,1)]
#above = [node(-spread, 2), node(spread, 2), node(1-spread, 2), node(1+spread, 2)]
#below = [node(0, 0), node(spread, 0), node(1-spread, 0), node(1+spread, 0)]
above = [node(-spread, 2), node(spread, 2), node(1-spread, 2), node(1+spread, 2), node(2-spread, 2), node(2+spread, 2), node(3-spread, 2), node(3+spread, 2)]
below = [node(0, 0), node(1, 0), node(2, 0), node(3, 0)]
text(1,-1, "$\hat{T}_4$")
node_connect(below[0], above[0], 2, [-.4, ""])
node_connect(below[0], above[1], 3, [.3, ""])
sign(above[0], 1, .2)
sign(above[1], 0, .2)
node_connect(below[1], above[2], 2, [-.4, ""])
node_connect(below[1], above[3], 3, [.3, ""])
sign(above[2], 1, .2)
sign(above[3], 0, .2)
node_connect(below[2], above[4], 2, [-.4, ""])
node_connect(below[2], above[5], 3, [.3, ""])
sign(above[4], 1, .2)
sign(above[5], 0, .2)
node_connect(below[3], above[6], 2, [-.4, ""])
node_connect(below[3], above[7], 3, [.3, ""])
sign(above[6], 1, .2)
sign(above[7], 0, .2)
#node_connect(below[3], above[3], 3, [.2, "j"])
plot([-.1,3.1], [0,0], color = (0,0,0), linewidth = 2)
show()
normal_hamilt2()
#simple_lines()
#contraction()
#clusters() | cc0-1.0 |
Luke092/MLDM_SFCrime | src/featureEngineering.py | 1 | 9541 | from utilities import *
from sklearn.cluster import KMeans
from sklearn import preprocessing
import pandas as pd
import time
import re
import os
def removeAtts(ds, intest, atts):
if not atts:
return ds, intest
for att in atts:
intest.remove(att)
for i in range(len(ds)):
for att in atts:
del ds[i][att]
return ds, intest
# Global variables
limit_X_min = -122.519703 # -122.511076
limit_X_max = -122.268906 # -122.365671
limit_Y_min = 37.684092 # 37.707777
limit_Y_max = 37.871601 # 37.836333
def processSDR(ds, intest):
n = len(ds)
new_ds = []
intest.insert(1, 'Season')
intest.insert(2, 'DailyRange')
seasons = {0: 'winter', 1: 'spring', 2: 'summer', 3: 'autumn'}
daily_ranges = {0: 'night', 1: 'morning', 2: 'afternoon', 3: 'evening'}
for i in range(n):
date = ds[i]['Dates']
splitted_date = date.split(' ')
day, time = splitted_date[0].strip(), splitted_date[1].strip()
month = day.split('-')
month = int(month[1])
hour = time.split(':')
hour = int(hour[0])
season = seasons[(month - 1) / 3]
daily_range = daily_ranges[hour / 6]
ds[i]['Season'] = season
ds[i]['DailyRange'] = daily_range
new_ds.append(ds[i])
printProgress(i, n)
return new_ds, intest
def processGrid(ds, gridSide):
min_x, max_x, min_y, max_y = getMinMax()
step_x = (max_x - min_x) / gridSide
step_y = (max_y - min_y) / gridSide
for row in ds:
row['X'] = int((float(row['X']) - min_x) / step_x)
row['Y'] = int((float(row['Y']) - min_y) / step_y)
return ds
def processCross(ds, intest):
n = len(ds)
intest.insert(4, 'isCross')
new_ds = []
for i in range(n):
address = ds[i]['Address']
isCross = re.search(' / ', address) is not None
ds[i]['isCross'] = isCross
new_ds.append(ds[i])
printProgress(i, n)
return new_ds, intest
def address_to_type(ds):
if (ds[0]['Address'] == None):
return None
crosses = []
for row in ds:
cross = row['Address'].split("/")
if (len(cross) == 1):
t = row['Address'].strip()[-2:]
row['Address'] = t
elif (cross[0].strip()[-2:] == cross[1].strip()[-2:]):
row['Address'] = cross[0].strip()[-2:]
else:
t1 = cross[0].strip()[-2:]
t2 = cross[1].strip()[-2:]
if (t1 + "/" + t2 in crosses):
row['Address'] = t1 + "/" + t2
elif (t2 + "/" + t1 in crosses):
row['Address'] = t2 + "/" + t1
else:
row['Address'] = t1 + "/" + t2
crosses.append(row['Address'])
return ds
def processStreet(ds, intest):
n = len(ds)
intest.insert(4, 'StreetType')
new_ds = []
crosses = []
for i in range(n):
address = ds[i]['Address']
isCross = re.search(' / ', address) is not None
if not isCross:
streetType = address[-2:]
else:
streetTypes = re.split(' / ', address)
streetTypes = [s[-2:] for s in streetTypes]
streetType = ' / '.join(streetTypes)
streetTypeReversed = ' / '.join(reversed(streetTypes))
if not streetType in crosses and not streetTypeReversed in crosses:
crosses.append(streetType)
if streetTypeReversed in crosses:
streetType = streetTypeReversed
ds[i]['StreetType'] = streetType
new_ds.append(ds[i])
printProgress(i, n)
return new_ds, intest
def processDay(ds, intest):
n = len(ds)
intest.insert(3, 'Weekend')
for i in range(n):
day = ds[i]['DayOfWeek']
if day == 'Saturday' or day == 'Sunday':
ds[i]['Weekend'] = True
else:
ds[i]['Weekend'] = False
printProgress(i, n)
return ds, intest
def processDate(ds, intest, toProcess='YMDHm'):
if not toProcess:
return ds, intest
n = len(ds)
processable = {'Y':'Year', 'M': 'Month', 'D': 'DayOfMonth', 'H': 'Hour', 'm': 'Minute'}
ex = []
pos = 0
for key, value in processable.iteritems():
if key in toProcess:
intest.insert(pos,value)
ex.append(value)
pos += 1
for i in range(n):
date = ds[i]['Dates']
splitted_date = date.split(' ')
day, time = splitted_date[0].strip(), splitted_date[1].strip()
day = day.split('-')
time = time.split(':')
processed = {'Y': day[0], 'M': day[1], 'D': day[2], 'H': time[0], 'm': time[1]}
for key, value in processable.iteritems():
if key in toProcess:
ds[i][value] = int(processed[key])
printProgress(i, n)
return ds, intest, ex
def getCorrectCoordinates(ds):
n = len(ds)
for i in range(n):
x = float(ds[i]['X'])
y = float(ds[i]['Y'])
x_ok = False
y_ok = False
if (limit_X_min <= x <= limit_X_max):
x_ok = True
if (limit_Y_min <= y <= limit_Y_max):
y_ok = True
if not (x_ok) or not (y_ok):
if ds[i]['Address'] == 'FLORIDA ST / ALAMEDA ST':
ds[i]['Address'] = 'TREAT ST'
if ds[i]['Address'] == 'ARGUELLO BL / NORTHRIDGE DR':
ds[i]['Address'] = 'ARGUELLO BL'
ds[i]['X'], ds[i]['Y'] = get_coordinates(ds[i]['Address'] + ', SAN FRANCISCO')
ds[i]['X'], ds[i]['Y'] = str(ds[i]['X']), str(ds[i]['Y'])
time.sleep(0.2)
printProgress(i, n)
return ds
def coordinate_normalization(save_on_file = False):
print '\nCoordinate normalization'
sets = ['train','test']
result = []
for dset in sets:
df = pd.read_csv('./Dataset/' + dset + '.csv')
min_max_scaler = preprocessing.MinMaxScaler()
X_norm = min_max_scaler.fit_transform(df.X)
Y_norm = min_max_scaler.fit_transform(df.Y)
df_norm = pd.DataFrame({'X' : X_norm, 'Y' : Y_norm})
result.append(df_norm)
if save_on_file:
print 'Saving on file'+'./Dataset/'+dset+'_XYnorm.csv'
df_norm.to_csv(path_or_buf='./Dataset/' + dset + '_XYnorm.csv', sep=',', na_rep='', float_format=None, columns=None, header=True, index=False, index_label=None, mode='w', encoding=None, compression=None, quoting=None, quotechar='"', line_terminator='\n', chunksize=None, tupleize_cols=False, date_format=None, doublequote=True, escapechar=None, decimal='.')
print 'Done'
return result
def coordinate_quantization(side):
sets = ['train','test']
normalized = coordinate_normalization()
index = 0
print '\nCoordinate quantization'
for dset in sets:
df_X, df_Y = pd.DataFrame({'X': normalized[index].X}), pd.DataFrame({'Y': normalized[index].Y})
print 'creating ','./Dataset/' + dset + '_XYquant_' + str(side) + '.csv'
X_quant = KMeans(n_clusters=side, random_state=0).fit_predict(df_X.as_matrix())
print 'ended X coordinate'
Y_quant = KMeans(n_clusters=side, random_state=0).fit_predict(df_Y.as_matrix())
print 'ended Y coordinate'
print 'Saving on file: ./Dataset/'+dset+'_XYquant_'+str(side)+'.csv'
pd.DataFrame({'X' : X_quant, 'Y' : Y_quant}).to_csv(path_or_buf='./Dataset/' + dset + '_XYquant_' + str(side) + '.csv', sep=',', na_rep='', float_format=None, columns=None, header=True, index=False, index_label=None, mode='w', encoding=None, compression=None, quoting=None, quotechar='"', line_terminator='\n', chunksize=None, tupleize_cols=False, date_format=None, doublequote=True, escapechar=None, decimal='.')
index += 1
print 'Done'
# def ProcessQuantization(ds, intest, side, train=1):
# if train == 1:
# path = './Dataset/train' + '_XYquant_' + str(side) + '.csv'
# if not os.path.exists(path):
# coordinate_quantization(side)
# XYQuant, _ = dsFromCSV(path)
# if train == 0:
# path = './Dataset/test' + '_XYquant_' + str(side) + '.csv'
# if not os.path.exists(path):
# coordinate_quantization(side)
# XYQuant, _ = dsFromCSV(path)
#
# for row in XYQuant:
#
# return ds, intest
def getMinMax():
ds_train, intest = dsFromCSV("./Dataset/train.csv")
ds_test, intest_test = dsFromCSV("./Dataset/test.csv")
min_x = float(ds_train[0]['X'])
max_x = float(ds_train[0]['X'])
min_y = float(ds_train[0]['Y'])
max_y = float(ds_train[0]['Y'])
n = len(ds_train) + len(ds_test)
for i in range(len(ds_train)):
x = float(ds_train[i]['X'])
y = float(ds_train[i]['Y'])
if limit_X_min <= x <= limit_X_max:
if x < min_x:
min_x = x
if x > max_x:
max_x = x
if limit_Y_min <= y <= limit_Y_max:
if y < min_y:
min_y = y
if y > max_y:
max_y = y
printProgress(i, n)
for i in range(len(ds_test)):
x = float(ds_test[i]['X'])
y = float(ds_test[i]['Y'])
if limit_X_min <= x <= limit_X_max:
if x < min_x:
min_x = x
if x > max_x:
max_x = x
if limit_Y_min <= y <= limit_Y_max:
if y < min_y:
min_y = y
if y > max_y:
max_y = y
printProgress(i+len(ds_train), n)
return min_x, max_x, min_y, max_y | gpl-3.0 |
mattskone/garage_alarm | models.py | 2 | 1467 | """
A module to return a trained model.
This module can be invoked manually to train a new model from existing samples.
$ python ./models.py
"""
import logging
import os
import pickle
from sklearn import svm
from sklearn.cross_validation import cross_val_score
import config
import samples
logger = logging.getLogger(__name__)
def get_trained_model(use_current=True):
"""Return a trained classifier."""
if use_current:
return pickle.load(open(os.path.join(config.INSTALL_DIR,
config.MODEL_FILE_NAME)))
else:
return _get_new_trained_model()
def _get_new_trained_model():
logger.info('Training new model')
training_samples, training_labels = samples.get_samples(
os.path.join(config.INSTALL_DIR, config.POSITIVE_SAMPLE_DIR),
os.path.join(config.INSTALL_DIR, config.NEGATIVE_SAMPLE_DIR))
model = svm.SVC(kernel='linear')
_score_model(model, training_samples, training_labels)
logger.info('Fitting new model')
model.fit(training_samples, training_labels)
return model
def _score_model(model, samples, labels):
print 'Scoring model...'
scores = cross_val_score(model, samples, labels, cv=5)
print 'Model accuracy score: {0:0.2f}'.format(scores.mean())
if __name__ == '__main__':
model = get_trained_model(False)
with open(os.path.join(config.INSTALL_DIR, config.MODEL_FILE_NAME), 'w') as f:
pickle.dump(model, f)
| mit |
neuroidss/nupic.research | projects/sensorimotor/experiments/capacity/data_utils.py | 22 | 6537 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Utilities to process and visualize data from the sensorimotor experiment
"""
import csv
import glob
import os
import sys
import matplotlib.pyplot as plt
from pylab import rcParams
def combineCsvFiles(directoryPath, outputFileName):
"""
Combines all csv files in specified path.
All files are assumed to have a header row followed by data.
The resulting file contains only 1 head but all of the files' data
combined.
Caution, the csv are iterated over in alphabetical order so a file
100.csv may be appended before a file 10.csv and may mess up yo' data plotting
"""
appendHeader = True
# Create csv output writer
os.chdir(directoryPath)
with open(outputFileName, "wb") as outputFile:
csvWriter = csv.writer(outputFile)
# Iterate over csv files in directory
for csvFileName in glob.glob("*.csv"):
# Ignore and write over old version of the same file name
if csvFileName != outputFileName:
# Read each file writing the pertinent file lines to output
with open(csvFileName, "rU") as inputFile:
csvReader = csv.reader(inputFile)
line = next(csvReader)
if appendHeader:
csvWriter.writerow(line)
appendHeader = False
line = next(csvReader)
csvWriter.writerow(line)
def getChartData(path, xDataColumnIdx, yDataColumnIdxs, yStdDevIdxs):
"""
Gets chart-ready data from the specified csv file
"""
assert len(yDataColumnIdxs) == len(yStdDevIdxs)
with open(path, "rU") as inputFile:
csvReader = csv.reader(inputFile)
# Get DV values
isHeader = True
xData = []
for row in csvReader:
if isHeader:
isHeader = False
else:
xData.append(float(row[xDataColumnIdx]))
# Get IVs' values
allYData = []
allYStdDevs = []
plotTitles = []
for i, yColIdx in enumerate(yDataColumnIdxs):
# Reset the file position to allow iterator reuse
inputFile.seek(0)
# build the y data and y std devs
yCol = []
yColStdDev = []
isHeader = True
stdDevIdx = yStdDevIdxs[i]
for row in csvReader:
if isHeader:
plotTitles.append(row[yColIdx])
else:
yCol.append(float(row[yColIdx]))
# Std Devs
if isHeader:
isHeader = False
elif stdDevIdx == -1:
yColStdDev.append(0)
else:
yColStdDev.append(float(row[stdDevIdx]))
allYData.append(yCol)
allYStdDevs.append(yColStdDev)
return xData, allYData, allYStdDevs, plotTitles
def getErrorbarFigures(title, X, Ys, stdDevs, plotTitles, xAxisLabel,
yAxisLabels, gridFormat):
"""
Plots the specified data and saves specified plot to file
"""
rcParams['figure.figsize'] = 15, 15
fig = plt.figure()
fig.suptitle(title)
fig.subplots_adjust(left=None, right=None, bottom=None, top=None,
wspace=None, hspace=0.35)
plt.ion()
plt.show()
rcParams.update({'font.size': 12})
for i, y in enumerate(Ys):
ax = fig.add_subplot(gridFormat + 1 + i)
ax.set_title(plotTitles[i])
ax.set_xlabel(xAxisLabel)
ax.set_ylabel(yAxisLabels[i])
ax.axis([0, max(X) + 10, 0, 20])
ax.errorbar(X, y, stdDevs[i])
return fig
def getErrorbarFigure(title, x, y, stdDevs, xAxisLabel, yAxisLabel,
xRangeMax=None, yRangeMax=None):
fig = plt.figure()
fig.suptitle(title)
fig.subplots_adjust(left=None, right=None, bottom=None, top=None,
wspace=None, hspace=0.35)
plt.ion()
plt.show()
ax = fig.add_subplot(111)
ax.set_xlabel(xAxisLabel)
ax.set_ylabel(yAxisLabel)
if xRangeMax is None:
xRangeMax = max(x) + 10
if yRangeMax is None:
yRangeMax = max(y) + 10
ax.axis([0, xRangeMax, 0, yRangeMax])
ax.errorbar(x, y, stdDevs)
plt.draw()
return fig
def plotSensorimotorExperimentResults(filesDir, outputFileName):
"""
Plots the data produced by
sensorimotor/experiments/capacity/run.py
"""
print "Combining csv's in: {0}".format(filesDir)
print "Output file name: {0}\n".format(outputFileName)
combineCsvFiles(filesDir, outputFileName + ".csv")
# 0 when number of worlds is IV
# 1 when number of elements is IV
xColumnIdx = 0
xAxisLabel = "Worlds"
yAxisLabels = ["Cells", "Cells", "Cells", "Cells", "Cols", "Cols"]
# Following indices are columns in the excel file produced by
# sensorimotor/experiments/capacity/run.py and represent the following
# metrics:
# Mean & Max Stability, Mean & Max Distinctness, Mean & Max Bursting Cols
yColumnIdxs = [11, 9, 16, 14, 46, 44]
# The following are the column indices in the same xls file for the std
# deviations of the metrics specified by yColumnIdxs. A -1 means the script
# won't plot a std dev for the corresponding metric.
yStdDevIdxs = [12, -1, 17, -1, 47, -1]
iv, dvs, stdDevs, metricTitles = getChartData(outputFileName + ".csv",
xColumnIdx, yColumnIdxs,
yStdDevIdxs)
# 3x2 subplot grid
gridFormat = 320
getErrorbarFigures(filesDir, iv, dvs, stdDevs, metricTitles,
xAxisLabel, yAxisLabels, gridFormat)
plt.savefig(outputFileName, bbox_inches="tight")
plt.draw()
raw_input("Press enter...")
if __name__ == "__main__":
if len(sys.argv) < 3:
print "Usage: ./data_utils.py FILES_DIR COMBINED_FILE_NAME"
sys.exit()
plotSensorimotorExperimentResults(sys.argv[1], sys.argv[2])
| agpl-3.0 |
lasigeBioTM/ssm | ssmpy/metrics.py | 1 | 13717 | from ssmpy.data import *
from ssmpy.calculations import *
import multiprocessing as mp
def get_all_commom_ancestors(all_ancestors, it1, it2):
"""
Get all common ancestors for it1 and it2
:param all_ancestors: pandas DataFrame of all ancestors
:param it1: entity 1 (id)
:param it2: entity 2 (id)
:return: pandas DataDrame of common ancestors or zero
"""
# get all ancestors for it1 and it2
all_ancestors_it1_it2 = all_ancestors[(all_ancestors.entry1 == it1) | (all_ancestors.entry1 == it2)]
# common_ancestors = all_ancestors_it1_it2['entry2'].value_counts().reset_index(name="count").query("count > 1")[
# 'index'].to_list()
# get the common ancestors for it1 and it2
common_ancestors = all_ancestors_it1_it2.groupby('entry2')['entry1'].apply(
lambda x: x.unique()).reset_index()
common_ancestors = common_ancestors[common_ancestors['entry1'].str.len() > 1].entry2.to_list()
if len(common_ancestors) > 0:
return common_ancestors
else:
return 0
def fast_resnik(all_ancestors, df_entry_ancestors, df_entry_ic, it1, it2):
"""
Calculates the RESNIK MICA INTRINSIC similarity between it1 and it2
:param all_ancestors: pandas DataFrame of all ancestors (from table transitive)
:param df_entry_ancestors: pandas DataFrame of all ancestors (from table entry) with column IC
:param df_entry_ic: pandas DataFrame of all entities (from table entry) with column IC
:param it1: entity 1 (id)
:param it2: entity 2 (id)
:return: list: [e1, e2, sim_resnik]
"""
if it1 == it2:
sim_resnik = df_entry_ic[df_entry_ic.id == it1].IC.to_list()[0]
else:
common_ancestors = get_all_commom_ancestors(all_ancestors, it1, it2)
if common_ancestors == 0:
sim_resnik = 0
else:
# get max ic for the common ancestors (MICA)
ic_ancestors = df_entry_ancestors[df_entry_ancestors.id.isin(common_ancestors)].IC
sim_resnik = ic_ancestors.max()
print((df_entry_ic[df_entry_ic.id == it1].name.to_list()[0]),
(df_entry_ic[df_entry_ic.id == it2].name.to_list()[0]), " RESNIK:MICA:INTRINSIC ",
sim_resnik)
return [df_entry_ic[df_entry_ic.id == it1].name.to_list()[0], df_entry_ic[df_entry_ic.id == it2].name.to_list()[0],
sim_resnik]
def fast_lin(all_ancestors, df_entry_ancestors, df_entry_ic, it1, it2):
"""
Calculates the LIN MICA INTRINSIC similarity between it1 and it2
:param all_ancestors: pandas DataFrame of all ancestors (from table transitive)
:param df_entry_ancestors: pandas DataFrame of all ancestors (from table entry) with column IC
:param df_entry_ic: pandas DataFrame of all entities (from table entry) with column IC
:param it1: entity 1 (id)
:param it2: entity 2 (id)
:return: list: [e1, e2, sim_lin]
"""
if it1 == it2:
sim_lin = 1
else:
common_ancestors = get_all_commom_ancestors(all_ancestors, it1, it2)
if common_ancestors == 0:
sim_lin = 0
else:
# get max ic for the common ancestors (MICA)
ic_ancestors_max = df_entry_ancestors[df_entry_ancestors.id.isin(common_ancestors)].IC.max()
sim_lin = (2 * ic_ancestors_max) / (
df_entry_ic[df_entry_ic.id == it1].IC.to_list()[0] +
df_entry_ic[df_entry_ic.id == it2].IC.to_list()[0])
# print((df[df.id == it1].name.to_list()[0]), (df[df.id == it2].name.to_list()[0]), " LIN:MICA:INTRINSIC ",
# sim_lin)
return [df_entry_ic[df_entry_ic.id == it1].name.to_list()[0], df_entry_ic[df_entry_ic.id == it2].name.to_list()[0],
sim_lin]
def fast_jc(all_ancestors, df_entry_ancestors, df_entry_ic, it1, it2):
"""
Calculates the JC MICA INTRINSIC similarity between it1 and it2
:param all_ancestors: pandas DataFrame of all ancestors (from table transitive)
:param df_entry_ancestors: pandas DataFrame of all ancestors (from table entry) with column IC
:param df_entry_ic: pandas DataFrame of all entities (from table entry) with column IC
:param it1: entity 1 (id)
:param it2: entity 2 (id)
:return: list: [e1, e2, sim_jc]
"""
if it1 == it2:
sim_jc = 1
else:
common_ancestors = get_all_commom_ancestors(all_ancestors, it1, it2)
if common_ancestors == 0:
ic_ancestors_max = 0
else:
# get max ic for the common ancestors (MICA)
ic_ancestors_max = df_entry_ancestors[df_entry_ancestors.id.isin(common_ancestors)].IC.max()
distance = df_entry_ic[df_entry_ic.id == it1].IC.to_list()[0] + df_entry_ic[df_entry_ic.id == it2].IC.to_list()[
0] - (
2 * ic_ancestors_max)
if distance > 0:
sim_jc = 1 / (distance + 1)
else:
sim_jc = 1
print((df_entry_ic[df_entry_ic.id == it1].name.to_list()[0]),
(df_entry_ic[df_entry_ic.id == it2].name.to_list()[0]), " JC:MICA:INTRINSIC ",
sim_jc)
return [df_entry_ic[df_entry_ic.id == it1].name.to_list()[0], df_entry_ic[df_entry_ic.id == it2].name.to_list()[0],
sim_jc]
def fast_resn_lin_jc(all_ancestors, df_entry_ancestors, df_entry_ic, it1, it2):
"""
Calculates the RESNIK, LIN and JC MICA INTRINSIC similarity between it1 and it2
:param all_ancestors: pandas DataFrame of all ancestors (from table transitive)
:param df_entry_ancestors: pandas DataFrame of all ancestors (from table entry) with column IC
:param df_entry_ic: pandas DataFrame of all entities (from table entry) with column IC
:param it1: entity 1 (id)
:param it2: entity 2 (id)
:return: list: [e1, e2, sim_resnik, sim_lin, sim_jc]
"""
if it1 == it2:
sim_resnik = df_entry_ic[df_entry_ic.id == it1].IC.to_list()[0]
sim_lin = 1
sim_jc = 1
else:
common_ancestors = get_all_commom_ancestors(all_ancestors, it1, it2)
if common_ancestors == 0:
sim_resnik = 0
sim_lin = 0
ic_ancestors_max_jc = 0
else:
# get max ic for the common ancestors (MICA)
ic_ancestors_resn = df_entry_ancestors[df_entry_ancestors.id.isin(common_ancestors)].IC
sim_resnik = ic_ancestors_resn.max()
ic_ancestors_max_lin = df_entry_ancestors[df_entry_ancestors.id.isin(common_ancestors)].IC.max()
ic_ancestors_max_jc = df_entry_ancestors[df_entry_ancestors.id.isin(common_ancestors)].IC.max()
sim_lin = (2 * ic_ancestors_max_lin) / (
df_entry_ic[df_entry_ic.id == it1].IC.to_list()[0] +
df_entry_ic[df_entry_ic.id == it2].IC.to_list()[0])
distance = df_entry_ic[df_entry_ic.id == it1].IC.to_list()[0] + df_entry_ic[df_entry_ic.id == it2].IC.to_list()[
0] - (
2 * ic_ancestors_max_jc)
if distance > 0:
sim_jc = 1 / (distance + 1)
else:
sim_jc = 1
return [df_entry_ic[df_entry_ic.id == it1].name.to_list()[0], df_entry_ic[df_entry_ic.id == it2].name.to_list()[0],
sim_resnik, sim_lin, sim_jc]
# def light_similarity(conn, entry_ids_1, entry_ids_2, metric):
# """
# main function
# :param conn: db_connection
# :param entry_ids_1: list of entries 1
# :param entry_ids_2: list of entries 2
# :param metric: 'lin', 'resnick', 'jc' or 'all'
# :return: list with results ([e1, e2, similarity] or [e1, e2, similarity resnik, similarity lin, similarity jc])
# """
#
# results = []
#
# # concatenate both list of entries
# entry_ids = np.unique(np.array(entry_ids_1 + entry_ids_2)).tolist()
#
# df_entry = db_select_entry(conn, entry_ids)
#
# # ids for each name in both lists
# ids_list = df_entry.id.values.flatten()
#
# # ids for each list
# ids_list_1 = df_entry[df_entry.name.isin(entry_ids_1)].id.values.flatten()
# ids_list_2 = df_entry[df_entry.name.isin(entry_ids_2)].id.values.flatten()
#
# if np.array_equal(ids_list_1, ids_list_2):
#
# # get all the ancestors for test_ids in transitive table
# all_ancestors = db_select_transitive(conn, ids_list)
#
# # get all ancestors in entry table
# df_entry_ancestors = db_select_entry_by_id(conn, all_ancestors.entry2.values.flatten())
#
# # get max freq used for calculating the Information Content (IC)
# max_freq = get_max_dest(conn)
#
# # calculates Ic for original IDs
# df_entry_ic = calculate_information_content_intrinsic(df_entry, max_freq)
#
# # calculates IC for all ancestors
# df_entry_ancestors = calculate_information_content_intrinsic(df_entry_ancestors, max_freq)
#
# if metric == 'resnik':
#
# for it1 in ids_list_1:
# pool = mp.Pool(20)
# results.append(pool.starmap(fast_resnik,
# [(all_ancestors, df_entry_ancestors, df_entry_ic, it1, it2) for it2 in
# ids_list_1]))
#
# pool.close()
#
# mask = np.where(ids_list_1 == it1)
# ids_list_1 = np.delete(ids_list_1, mask)
#
# elif metric == 'lin':
# count = 0
# for it1 in ids_list_1:
# print(count, ..., len(ids_list_1))
# count += 1
#
# pool = mp.Pool(20)
# results.append(pool.starmap(fast_lin,
# [(all_ancestors, df_entry_ancestors, df_entry_ic, it1, it2) for it2 in
# ids_list_1]))
#
# pool.close()
#
# mask = np.where(ids_list_1 == it1)
# ids_list_1 = np.delete(ids_list_1, mask)
#
# elif metric == 'jc':
#
# for it1 in ids_list_1:
# pool = mp.Pool(20)
# results.append(pool.starmap(fast_jc,
# [(all_ancestors, df_entry_ancestors, df_entry_ic, it1, it2) for it2 in
# ids_list_1]))
#
# pool.close()
#
# mask = np.where(ids_list_1 == it1)
# ids_list_1 = np.delete(ids_list_1, mask)
#
#
# elif metric == 'all':
#
# for it1 in ids_list_1:
# pool = mp.Pool(20)
#
# results.append(pool.starmap(fast_resn_lin_jc,
# [(all_ancestors, df_entry_ancestors, df_entry_ic, it1, it2) for it2 in
# ids_list_1]))
#
# pool.close()
# mask = np.where(ids_list_1 == it1)
# ids_list_1 = np.delete(ids_list_1, mask)
#
# else:
#
# # get all the ancestors for test_ids in transitive table
# all_ancestors = db_select_transitive(conn, ids_list)
#
# # get all ancestors in entry table
# df_entry_ancestors = db_select_entry_by_id(conn, all_ancestors.entry2.values.flatten())
#
# # get max freq used for calculating the Information Content (IC)
# max_freq = get_max_dest(conn)
#
# # calculates Ic for original IDs
# df_entry_ic = calculate_information_content_intrinsic(df_entry, max_freq)
#
# # calculates IC for all ancestors
# df_entry_ancestors = calculate_information_content_intrinsic(df_entry_ancestors, max_freq)
#
# if metric == 'resnik':
#
# for it1 in ids_list_1:
# pool = mp.Pool(20)
# results.append(pool.starmap(fast_resnik,
# [(all_ancestors, df_entry_ancestors, df_entry_ic, it1, it2) for it2 in
# ids_list_2]))
#
# pool.close()
#
# mask = np.where(ids_list_1 == it1)
# ids_list_1 = np.delete(ids_list_1, mask)
#
# elif metric == 'lin':
# count = 0
# for it1 in ids_list_1:
# print(count, ..., len(ids_list_1))
# count += 1
#
# pool = mp.Pool(20)
# results.append(pool.starmap(fast_lin,
# [(all_ancestors, df_entry_ancestors, df_entry_ic, it1, it2) for it2 in
# ids_list_2]))
#
# pool.close()
#
# mask = np.where(ids_list_1 == it1)
# ids_list_1 = np.delete(ids_list_1, mask)
#
# elif metric == 'jc':
#
# for it1 in ids_list_1:
# pool = mp.Pool(20)
# results.append(pool.starmap(fast_jc,
# [(all_ancestors, df_entry_ancestors, df_entry_ic, it1, it2) for it2 in
# ids_list_2]))
#
# pool.close()
#
# mask = np.where(ids_list_1 == it1)
# ids_list_1 = np.delete(ids_list_1, mask)
#
#
# elif metric == 'all':
#
# for it1 in ids_list_1:
# pool = mp.Pool(20)
#
# results.append(pool.starmap(fast_resn_lin_jc,
# [(all_ancestors, df_entry_ancestors, df_entry_ic, it1, it2) for it2 in
# ids_list_2]))
#
# pool.close()
# mask = np.where(ids_list_1 == it1)
# ids_list_1 = np.delete(ids_list_1, mask)
#
# return results
| apache-2.0 |
jakobworldpeace/scikit-learn | sklearn/cluster/setup.py | 79 | 1855 | # Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.pyx'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.pyx'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('_k_means_elkan',
sources=['_k_means_elkan.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('_k_means',
libraries=cblas_libs,
sources=['_k_means.pyx'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop(
'extra_compile_args', []),
**blas_info
)
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
j-lord/beefy_bash | colors.py | 1 | 1110 | from __future__ import division
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
# Sort colors by hue, saturation, value and name.
by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgba(color)[:3])), name)
for name, color in colors.items())
sorted_names = [name for hsv, name in by_hsv]
n = len(sorted_names)
ncols = 4
nrows = n // ncols + 1
fig, ax = plt.subplots(figsize=(8, 5))
# Get height and width
X, Y = fig.get_dpi() * fig.get_size_inches()
h = Y / (nrows + 1)
w = X / ncols
for i, name in enumerate(sorted_names):
col = i % ncols
row = i // ncols
y = Y - (row * h) - h
xi_line = w * (col + 0.05)
xf_line = w * (col + 0.25)
xi_text = w * (col + 0.3)
ax.text(xi_text, y, name, fontsize=(h * 0.8),
horizontalalignment='left',
verticalalignment='center')
ax.hlines(y + h * 0.1, xi_line, xf_line,
color=colors[name], linewidth=(h * 0.6))
ax.set_xlim(0, X)
ax.set_ylim(0, Y)
ax.set_axis_off()
fig.subplots_adjust(left=0, right=1,
top=1, bottom=0,
hspace=0, wspace=0)
plt.show()
| mit |
RRShieldsCutler/clusterpluck | clusterpluck/scripts/blastp_to_matrix.py | 1 | 6813 | #!/usr/bin/env python
#
# Robin Shields-Cutler
# August 2016
# takes standard blast output TSV (outfmt 6-- *.b6 or *.txt, etc) and stores entries in dictionary,
# then writes to dataframe and exports as CSV
usage = 'blastp_to_matrix.py -i BLASTOUT.b6 -s SCORE_METHOD -t THRESHOLD -o OUTFILE.csv'
import argparse
import os
import csv
import pandas as pd
import numpy as np
import re
from collections import defaultdict
def make_arg_parser():
parser = argparse.ArgumentParser(description='Convert blastp output txt table to a scores matrix in csv format')
parser.add_argument('-i', '--input', help='The blast output file to process.', required=True, type=str)
parser.add_argument('-s', '--score', help='Which score to enter into matrix: "pident", "evalue", or "bitscore", or "justnorm" if using R to make the matrix', required=False, type=str, default='bitscore')
parser.add_argument('-t', '--threshold', help='The threshold (float) for entry into matrix.', required=False, type=float, default=1)
parser.add_argument('-o', '--output', help='Where to put the output (CSV or h5)', required=False, type=str, default='blastp_matrixform.csv')
parser.add_argument('-n', '--normalize', help='Normalize bitscore to score of self-self for each cluster (as 100).', action='store_true', required=False, default=False)
parser.add_argument('-r', '--spread', help='The spread matrix from R', required=False)
parser.add_argument('--genbank', help='If the result uses genbank IDs not refseq', action='store_true', required=False, default=False)
return parser
def ofu_tree_parsing(infile, s_method, t):
sparse_blast_id_dict = defaultdict(dict)
with open(infile) as blast_inf:
# next(blast_inf)
blast_tsv = csv.reader(blast_inf, delimiter='\t')
# line[0] query name, line[1] = reference name, line[2] = % match, line[10] = e-value, line[11] = bitscore
if s_method == 'justnorm':
self_match_dict = defaultdict(dict)
for line in blast_tsv:
m = line[0]
n = line[1]
# mref = m.group(1, 2)
# nref = n.group(1, 2)
# # print(mref)
bvalue = np.float(line[11])
if m == n:
self_match_dict[line[0]] = bvalue
elif s_method == 'bitscore':
self_match_dict = defaultdict(dict)
for line in blast_tsv:
m = line[0]
n = line[1]
# mref = m.group(1, 2)
# nref = n.group(1, 2)
# # print(mref)
bvalue = np.float(line[11])
if m == n:
self_match_dict[line[0]] = bvalue
if bvalue > t:
sparse_blast_id_dict[line[0]][line[1]] = bvalue
# TODO: use the evalue of perfect matches to normalize the data
elif s_method == 'evalue':
for line in blast_tsv:
# p = re.compile(r'(\w+_[\w+\d+]*\.\d)(_\w+\d\d\d)(_ctg\d_orf\d+)')
# m = p.search(line[0])
# n = p.search(line[1])
# mref = m.group(1)
# nref = n.group(1)
# cname = ''.join(m.group(1, 2, 3))
# rname = ''.join(n.group(1, 2, 3))
# if mref == nref:
# pass
# else:
evalue = np.float(line[10])
if evalue < t:
sparse_blast_id_dict[line[0]][line[1]] = evalue
elif s_method == 'pident':
for line in blast_tsv:
# p = re.compile(r'(\w+_[\w+\d+]*\.\d)(_\w+\d\d\d)(_ctg\d_orf\d+)')
# m = p.search(line[0])
# n = p.search(line[1])
# mref = m.group(1, 2)
# nref = n.group(1, 2)
# cname = ''.join(m.group(1, 2, 3))
# rname = ''.join(n.group(1, 2, 3))
# if mref == nref:
# pass
# else:
ivalue = np.float(line[2])
if ivalue > t:
sparse_blast_id_dict[line[0]][line[1]] = ivalue
return sparse_blast_id_dict
def main():
parser = make_arg_parser()
args = parser.parse_args()
if args.genbank:
s_method = args.score
t = args.threshold
infile = args.input
sparse_blast_id_dict = ofu_tree_parsing(infile, s_method, t)
else:
sparse_blast_id_dict = defaultdict(dict)
with open(args.input) as blast_inf:
# next(blast_inf)
blast_tsv = csv.reader(blast_inf, delimiter='\t')
# line[0] query name, line[1] = reference name, line[2] = % match, line[10] = e-value, line[11] = bitscore
if args.score == 'justnorm':
self_match_dict = defaultdict(dict)
for line in blast_tsv:
p = re.compile(r'(\w+_[\w+\d+]*\.\d)(_\w+\d\d\d)(_ctg\d+_orf\d+)')
m = p.search(line[0])
n = p.search(line[1])
# mref = m.group(1, 2)
# nref = n.group(1, 2)
cname = ''.join(m.group(1, 2, 3))
rname = ''.join(n.group(1, 2, 3))
# # print(mref)
bvalue = np.float(line[11])
if cname == rname:
self_match_dict[line[0]] = bvalue
elif args.score == 'bitscore':
self_match_dict = defaultdict(dict)
for line in blast_tsv:
p = re.compile(r'(\w+_[\w+\d+]*\.\d)(_\w+\d\d\d)(_ctg\d+_orf\d+)')
m = p.search(line[0])
n = p.search(line[1])
# mref = m.group(1, 2)
# nref = n.group(1, 2)
cname = ''.join(m.group(1, 2, 3))
rname = ''.join(n.group(1, 2, 3))
# # print(mref)
bvalue = np.float(line[11])
if cname == rname:
self_match_dict[line[0]] = bvalue
if bvalue > args.threshold:
sparse_blast_id_dict[line[0]][line[1]] = bvalue
# TODO: use the evalue of perfect matches to normalize the data
elif args.score == 'evalue':
for line in blast_tsv:
# p = re.compile(r'(\w+_[\w+\d+]*\.\d)(_\w+\d\d\d)(_ctg\d_orf\d+)')
# m = p.search(line[0])
# n = p.search(line[1])
# mref = m.group(1)
# nref = n.group(1)
# cname = ''.join(m.group(1, 2, 3))
# rname = ''.join(n.group(1, 2, 3))
# if mref == nref:
# pass
# else:
evalue = np.float(line[10])
if evalue < args.threshold:
sparse_blast_id_dict[line[0]][line[1]] = evalue
elif args.score == 'pident':
for line in blast_tsv:
# p = re.compile(r'(\w+_[\w+\d+]*\.\d)(_\w+\d\d\d)(_ctg\d_orf\d+)')
# m = p.search(line[0])
# n = p.search(line[1])
# mref = m.group(1, 2)
# nref = n.group(1, 2)
# cname = ''.join(m.group(1, 2, 3))
# rname = ''.join(n.group(1, 2, 3))
# if mref == nref:
# pass
# else:
ivalue = np.float(line[2])
if ivalue > args.threshold:
sparse_blast_id_dict[line[0]][line[1]] = ivalue
if args.score == 'justnorm':
with open(args.spread, 'r') as inf:
df = pd.read_csv(inf, header=0, index_col=0, engine='c')
else:
df = pd.DataFrame.from_dict(sparse_blast_id_dict)
df.sort_index(axis=0, inplace=True)
df.sort_index(axis=1, inplace=True)
# print(df.shape[0])
if args.normalize:
vals = []
for cluster in list(df.columns):
vals.append(self_match_dict[cluster])
df = df / vals * 100
df = df.round(decimals=1)
# print(len(vals))
# Check if a matrix is symmetric
# arr = df.values
# print((arr.transpose() == -arr).all())
if args.output.endswith('.csv'):
df.to_csv(args.output)
else:
df.to_hdf(args.output, 'table')
if __name__ == '__main__':
main()
| mit |
rseubert/scikit-learn | sklearn/metrics/tests/test_ranking.py | 11 | 37239 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
"""Test Area under Receiver Operating Characteristic (ROC) curve"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
"""Test whether the returned threshold matches up with tpr"""
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
"""Test to ensure that we don't return spurious repeating thresholds.
Duplicated thresholds can arise due to machine precision issues.
"""
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
"""roc_curve not applicable for multi-class problems"""
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
"""roc_curve for confidence scores"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
"""roc_curve for hard decisions"""
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
"""Test Area Under Curve (AUC) computation"""
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
"""Test that roc_auc_score function returns an error when trying
to compute AUC for non-binary class values.
"""
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
"""Test Precision-Recall and aread under PR curve"""
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
"""Check on several small example that it works """
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
"""Check tie handling in score"""
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
""" Check that Label ranking average precision works for various"""
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
return_indicator=True,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
| bsd-3-clause |
balbinot/genkai | sandbox/bmf.py | 1 | 4478 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import numpy as np
from scipy.misc import factorial
import matplotlib.pyplot as p
from gencmd.model import SSP
from gencmd.util.phot_sys import col_dict, ecoeff
import emcee
from triangle import corner
def sig(x, a, b, c):
return a + np.exp((x-b)/c)
def interp_model(X,Y,Z):
from scipy.interpolate import interp2d
return interp2d(X, Y, Z, kind='linear', fill_value=0, bounds_error=False)
def interp_model_spl(X, Y, Z):
from scipy.interpolate import RectBivariateSpline
return RectBivariateSpline(X, Y, Z)
def gen_mock():
idir = "/scratch/isocdir/"
nobs = 30
nbg = 1000
nmod = 2000000
dmod = 16.0
age = 10.08
Z = 0.015/100
## Mock with some reasonable magnitude limits
mmin, mmax = 12, 24
cmin, cmax = -0.5, 2.0
mycl = SSP(age, Z, mf='kroupa', isocdir=idir, a=[2.35], mlim=[0.4,1.2])
_, ostars = mycl.populate(n=nobs)
_, mstars = mycl.populate(n=nmod)
## Add some photometric errors
g = ostars[:,3] + dmod
g += np.random.randn(len(g))*sig(g, *ecoeff['des']['g'])
r = ostars[:,4] + dmod
r += np.random.randn(len(r))*sig(r, *ecoeff['des']['r'])
bg = mmin + (mmax - mmin)*np.random.rand(nbg)
bgr = cmin + (cmax - cmin)*np.random.rand(nbg)
br = -(bgr - bg)
#p.plot(bg-br, br, 'k.', ms=1)
#p.show()
#exit()
I = (bg-br<cmax)&(bg-br>cmin)&(bg<mmax)&(bg>mmin)
bg = bg[np.where(I)]
br = br[np.where(I)]
np.savetxt('sim_bg.dat', np.c_[bg,br], fmt='%.3f %.3f'.split())
I = (g-r<cmax)&(g-r>cmin)&(g<mmax)&(g>mmin)
g = g[np.where(I)]
r = r[np.where(I)]
np.savetxt('sim_cl.dat', np.c_[g,r], fmt='%.3f %.3f'.split())
mg = mstars[:,3] + dmod
mg += np.random.randn(len(mg))*sig(mg, *ecoeff['des']['g'])
mr = mstars[:,4] + dmod
mr += np.random.randn(len(mr))*sig(mr, *ecoeff['des']['r'])
fcl,xl,yl = np.histogram2d(mg-mr, mg, bins=(80,160),
range=[[cmin,cmax],[mmin,mmax]], normed=True)
np.savez('cl_model.npz', fcl=fcl, xl=xl, yl=yl)
def lhood(P, F, B, obs, N):
l = P[0]
g = P[1]
col = obs[0]
mag = obs[1]
#f = F(col, mag)
#b = B(col, mag)
f = F
b = B
if l < 0 or g < 0 or l+g > len(col)+8*np.sqrt(len(col)):
return -np.Inf
else:
prob = l*f/(l*f + g*b)
prob = np.ma.array(prob)
lnL = -(l + g) - np.sum(np.ma.log((1-prob)/(g*b)))
return lnL
gen_mock()
g,r = np.loadtxt('sim_cl.dat', unpack=True)
bg,br = np.loadtxt('sim_bg.dat', unpack=True)
x = np.load('cl_model.npz')
fcl, xl, yl = x['fcl'], x['xl'], x['yl']
true_cl = len(g)
true_bg = len(bg)
true_ntot = true_cl + true_bg
print(true_cl, true_bg, true_ntot)
#p.plot(bg-br, bg, 'ro')
#p.plot(g-r, g, 'ko')
#p.show()
#g = np.r_[g,bg]
#r = np.r_[r,br]
g = bg
r = br
from scipy.ndimage import gaussian_filter as gs
fcl = gs(fcl, 2)
fcl /= np.sum(fcl)
fbg = np.ones_like(fcl)
fbg /= np.sum(fbg)
ext = [xl[0], xl[-1], yl[-1], yl[0]]
tx = (xl[1:] + xl[:-1])/2
ty = (yl[1:] + yl[:-1])/2
F = np.vectorize(interp_model_spl(tx,ty,fcl))
B = np.vectorize(interp_model_spl(tx,ty,fbg))
#nx = cmin + (cmax-cmin)*np.random.rand(10000)
#ny = mmin + (mmax-mmin)*np.random.rand(10000)
#tmpb = B(nx,ny)
#p.scatter(nx, ny, c=tmpb, s=30, lw=0)
#p.colorbar()
#p.show()
#tmpb = np.array(F(ny,nx))
#p.imshow(tmpb, extent=ext, vmin=0)
#p.colorbar()
#p.show()
c = g - r
m = g
C = c
M = m
f = F(C, M)
b = B(C, M)
Num = true_ntot + 8*np.sqrt(true_ntot)
ndim, nwalkers = 2, 8
p0 = [[true_cl + 5*np.random.randn(),true_bg + 5*np.random.randn()] for i in
range(nwalkers)]
#p0 = [*np.random.rand(ndim) for i in range(nwalkers)]
#sampler = emcee.EnsembleSampler(nwalkers, ndim, lhood, args=[F, B, [C,M], Num],
# threads=8)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lhood, args=[f, b, [C,M], Num],
threads=8)
pos, prob, state = sampler.run_mcmc(p0, 100)
sampler.reset()
sampler.run_mcmc(pos, 1500)
lbl = [r'$N_{cl}$', r'$N_{bg}$']
chain = sampler.flatchain
corner(chain, labels=lbl, truths=[true_cl, true_bg])
for i, n in enumerate(lbl):
p.figure()
p.plot(chain[:,i], 'k-', alpha=0.3)
p.xlabel('Iter')
p.ylabel(n)
print(n, np.median(chain[:,i]), u'±', np.std(chain[:,i]))
p.figure()
p.plot(c, m, 'k.', ms=1)
p.ylim(p.ylim()[::-1])
p.show()
| bsd-3-clause |
Chris7/django-djangui | djangui/tests/scripts/heatmap.py | 6 | 1803 | #!/usr/bin/env python
__author__ = 'chris'
import argparse
import os
import sys
import pandas as pd
import seaborn as sns
import numpy as np
parser = argparse.ArgumentParser(description="Create a heatmap from a delimited file.")
parser.add_argument('--tsv', help='The delimited file to plot.', type=argparse.FileType('r'), required=True)
parser.add_argument('--delimiter', help='The delimiter for fields. Default: tab', type=str, default='\t')
parser.add_argument('--row', help='The column containing row to create a heatmap from. Default to first row.', type=str)
parser.add_argument('--cols', help='The columns to choose values from (separate by a comma for multiple). Default: All non-rows', type=str)
parser.add_argument('--log-normalize', help='Whether to log normalize data.', action='store_true')
def main():
args = parser.parse_args()
data = pd.read_table(args.tsv, index_col=args.row if args.row else 0, sep=args.delimiter, encoding='utf-8')
if args.cols:
try:
data = data.loc[:,args.cols.split(',')]
except KeyError:
data = data.iloc[:,[int(i)-1 for i in args.cols.split(',')]]
if len(data.columns) > 50:
raise BaseException('Too many columns')
data = np.log2(data) if args.log_normalize else data
data[data==-1*np.inf] = data[data!=-1*np.inf].min().min()
width = 5+0 if len(data.columns)<50 else (len(data.columns)-50)/100
row_cutoff = 1000
height = 15+0 if len(data)<row_cutoff else (len(data)-row_cutoff)/75.0
seaborn_map = sns.clustermap(data, figsize=(width, height))
seaborn_map.savefig('{}_heatmap.png'.format(os.path.split(args.tsv.name)[1]))
seaborn_map.data2d.to_csv('{}_heatmap.tsv'.format(os.path.split(args.tsv.name)[1]), sep='\t')
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
mikeireland/pyhermes | hermes.py | 1 | 83192 | """This module contains the HERMES data reduction class.
"""
#Example setup analysis for all channels:
#import hermes
#hermes.go_all('/Users/mireland/data/hermes/140310/data/', '/Users/mireland/tel/hermes/140310/', '/Users/mireland/python/pyhermes/cal/')
#Example setup analysis for a full night: blue
#hm = hermes.HERMES('/Users/mireland/data/hermes/140310/data/ccd_1/', '/Users/mireland/tel/hermes/140310/ccd_1/', '/Users/mireland/python/pyhermes/cal/ccd_1/')
#Example setup analysis for a full night: green.
#hm = hermes.HERMES('/Users/mireland/data/hermes/140310/data/ccd_2/', '/Users/mireland/tel/hermes/140310/ccd_2/', '/Users/mireland/python/pyhermes/cal/ccd_2/')
#Example setup analysis for a full night: red.
#hm = hermes.HERMES('/Users/mireland/data/hermes/140310/data/ccd_3/', '/Users/mireland/tel/hermes/140310/ccd_3/', '/Users/mireland/python/pyhermes/cal/ccd_3/')
#Example setup analysis for a full night: ir.
#hm = hermes.HERMES('/Users/mireland/data/hermes/140310/data/ccd_4/', '/Users/mireland/tel/hermes/140310/ccd_4/', '/Users/mireland/python/pyhermes/cal/ccd_4/')
#Then go!
#hm.go()
from __future__ import print_function, division
try:
import pyfits
except:
import astropy.io.fits as pyfits
try:
from PyAstronomy import pyasl
barycorr = True
except:
print("WARNING: PyAstronomy is required for barycentric corrections, or combining multiple epochs.")
barycorr = False
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage as nd
import matplotlib.cm as cm
import time
import glob
import os
import threading
from multiprocessing import Process
import pdb
import sys
class HERMES():
"""The HERMES Class. It must always be initiated with
a data, reduction and calibration directory.
Parameters
----------
ddir: string
Raw data directory
rdir: string
Reduction directory
cdir: string
gdir: string (optional)
GALAH collaboration standard output directory. If not given, the rdir is used.
"""
def __init__(self, ddir, rdir, cdir, gdir=''):
self.ddir = ddir
self.rdir = rdir
self.cdir = cdir
if gdir=='':
self.gdir = rdir
else:
self.gdir = gdir
#Each release should increment this number.
self.release = 0.1
#A dictionary of central wavelengths (can be changed)
#This comes from the SPECTID
self.fixed_wave0 = {'BL':4700.0,'GN':5630.0,'RD':6460.0,'RR':7570.0}
self.ccd_nums = {'BL':'1','GN':'2','RD':'3','RR':'4'}
self.fixed_R = 200000
self.fixed_nwave = 9000
def basic_process(self, infile):
"""Read in the file, correct the over-scan region (and do anything else
that can be done prior to bias subtraction (e.g. removal of obscure
readout artifacts/pattern noise could go here as an option)
Parameters
----------
infile: The input filename (no directory)
Returns
-------
d: (ny,nx) array
"""
d=pyfits.getdata(self.ddir + infile)
header=pyfits.getheader(self.ddir + infile)
overscan_mns = np.mean(d[:,header['WINDOXE1']:],axis=1)
d = d[:,:header['WINDOXE1']]
for i in range(d.shape[0]):
d[i,:] -= overscan_mns[i]
return d
def find_badpix(self, infiles, var_threshold=10.0, med_threshold=10.0):
"""Find the bad pixels from a set of dark files. All we care about are pixels
that vary a lot or are very hot. We will give pixels the benefit of the doubt if they are
only bright once (e.g. cosmic ray during readout...)
Parameters
----------
infiles: An array of input filenames
var_threshold: float (optional)
A pixel has to have a variance more than var_threshold times the median to be
considered bad (NB the frame with largest flux is removed in this calculation,
in case of cosmic rays)
med_threshold: float (optional)
A pixel has to have a value more than med_threshold above the median to be
considered bad. This should be particularly relevant for finding hot pixels
in darks.
Returns
-------
badpix: float array
The 2D image which is 0 for good pixels and 1 for bad pixels.
"""
header = pyfits.getheader(self.ddir + infiles[0])
nx = header['WINDOXE1']
ny = header['NAXIS2']
nf = len(infiles)
if nf < 4:
print("ERROR: At least 4 files needed to find bad pixels")
raise UserWarning
cube = np.zeros((nf,ny,nx),dtype=np.uint8)
for i in range(nf):
cube[i,:,:] = self.basic_process(infiles[i])
medim = np.median(cube, axis=0)
varcube = np.zeros((ny,nx))
for i in range(nf):
cube[i,:,:] -= medim
varcube += cube[i,:,:]**2
maxcube = np.amax(cube,axis=0)
varcube -= maxcube**2
varcube /= (nf-2)
medvar = np.median(varcube)
medval = np.median(medim)
medsig = np.sqrt(medvar)
print("Median pixel standard deviation: " + str(medsig))
ww = np.where( (varcube > var_threshold*medvar) * (medim > med_threshold*medsig + medval) )
print(str(len(ww[0])) + " bad pixels identified.")
badpix = np.zeros((ny,nx),dtype=np.uint8)
badpix[ww]=1
for i in range(nf):
header['HISTORY'] = 'Input: ' + infiles[i]
hl = pyfits.HDUList()
hl.append(pyfits.ImageHDU(badpix,header))
hl.writeto(self.rdir+'badpix.fits',clobber=True)
return badpix
def median_combine(self, infiles, outfile):
"""Median combine a set of files. Most useful for creating a master bias.
Parameters
----------
infiles: string array
Input files
outfile: string
The output file (goes in the reduction directory rdir)
Returns
-------
image: float array
The median combined image.
"""
header = pyfits.getheader(self.ddir + infiles[0])
nx = header['WINDOXE1']
ny = header['NAXIS2']
nf = len(infiles)
cube = np.zeros((nf,ny,nx))
for i in range(nf):
cube[i,:,:] = self.basic_process(infiles[i])
medcube = np.median(cube, axis=0)
for i in range(nf):
header['HISTORY'] = 'Input: ' + infiles[i]
hl = pyfits.HDUList()
hl.append(pyfits.ImageHDU(medcube.astype('f4'),header))
hl.writeto(self.rdir+outfile,clobber=True)
return medcube
def clobber_cosmic(self, image, threshold=3.0):
"""Remove cosmic rays and reset the pixel values to something sensible.
As a general rule, cosmic rays should be flagged rather than clobbered,
but this routine is here as a placeholder."""
smoothim = nd.filters.median_filter(image,size=5)
ww = np.where(image > smoothim*threshold)
image[ww] = smoothim[ww]
return image
def make_cube_and_bad(self,infiles, badpix=[], threshold=6.0, mad_smooth=64):
"""Based on at least 2 input files, find outlying bright pixels and flag them
as bad.
Parameters
----------
infiles: string array
The array of input files to be cubed and have their bad pixels flagged.
mad_smooth: int, optional
The distance in the x-direction that the median absolute deviation (MAD)
is smoothed over in order to determine image statistics
threshold: float, optional
The threshold in units of standard deviation to identify bad pixels.
Notes
-----
This routine has 1/3 its time up to the reference_im and 1/3 the time
in the loop beyond, on several lines. So tricky to optimise.
"""
if len(infiles) < 2:
print("Error: make_cube_and_bad needs at least 2 input files")
raise UserWarning
if len(badpix)==0:
badpix = pyfits.getdata(self.cdir + 'badpix.fits')
header = pyfits.getheader(self.ddir + infiles[0])
szy = header['NAXIS2']
szx = header['WINDOXE1']
if (szx % mad_smooth != 0):
print("ERROR: x axis length must be divisible by mad_smooth")
raise UserWarning
cube = np.empty((len(infiles),szy,szx))
normalised_cube = np.empty((len(infiles),szy,szx))
bad = np.empty((len(infiles),szy,szx), dtype=np.uint8)
for i,infile in enumerate(infiles):
im = self.basic_process(infile)
cube[i,:,:]=im
normalised_cube[i,:,:]= im/np.median(im)
bad[i,:,:] = badpix
# Create a mean image that ignores the maximum pixel values over all images (i.e. cosmic rays)
reference_im = (np.sum(normalised_cube,axis=0) - np.max(normalised_cube,axis=0))/(len(infiles) - 1.0)
szy = cube.shape[1]
# Look for bad pixels (hot or cosmic rays) by empirically finding pixels that deviate
# unusually from the minimum image.
for i in range(len(infiles)):
diff = normalised_cube[i,:,:] - reference_im
#Entire rows can't be used for a row_deviation, as tramlines curve. But
#a pretty large section can be used. Unfortunately, a straight median filter
#on a large section is slow...
row_deviation = np.abs(diff).reshape( (szy,szx//mad_smooth, mad_smooth) )
row_deviation = np.repeat(np.median(row_deviation, axis=2),mad_smooth).reshape( (szy,szx) )
tic = time.time()
#Slow line... even with only 21 pixels.
# row_deviation = nd.filters.median_filter(np.abs(diff),size=[1,21])
ww = np.where(diff > 1.4826*threshold*row_deviation)
bad[i,ww[0],ww[1]] = 1
#Another slow alternative...
# row_deviation = np.median(np.abs(diff),axis=1)
# for j in range(szy):
# ww = np.where(diff[j,:] > 1.4826*threshold*row_deviation[j])[0]
# if len(ww)>0:
# bad[i,j,ww]=1
# #for w in ww:
return cube,bad
def make_psf(self,npix=15, oversamp=3, fibre_radius=2.5, optics_psf_fwhm=1.0):
"""Make a 1-dimensional collapsed PSF provile based on the physics of image
formation. This does not include effects of variable magnification across the chip...
(i.e. it applies to a Littrow image).
Parameters
----------
npix: int, optional
The number of pixels to create the PSF profile. Must be odd.
oversamp: int, optional
The oversampling factor for the PSF. In order for interpolation to work
reasonably well, 3 is a default.
fibre_radius: float, optional
The radius of the fiber in pixels.
optics_psf_fwhm: float, optional
The FWHM of a Gaussian approximation to the optical aberration function.
Returns
-------
A point-spread function normalised to the maximum value.
Notes
-----
This is far from complete: the PSF is variable etc, but it is a good
approximation
"""
x = ( np.arange(npix*oversamp) - npix*oversamp//2 )/float(oversamp)
psf = np.sqrt( np.maximum( fibre_radius**2 - x**2 ,0) )
g = np.exp(-x**2/2/(optics_psf_fwhm/2.35482)**2 )
psf = np.convolve(psf,g, mode='same')
psf = np.convolve(psf,np.ones(oversamp), mode='same')
return psf/np.max(psf)
def extract(self,infiles,cube=[],flux_on_ron_var=10.0,fix_badpix=True,badpix=[]):
"""Extract spectra from an image or a cube. Note that we do *not* apply a fibre flat
at this point, because that can only done after (optional) cross-talk and scattered-light
correction.
Parameters
----------
infiles: string array
An array of input filenames
cube: float array, optional
The data, coming from a cleaned version of the input files.
flux_on_ron_var: float, optional
The target pixel flux divided by the readout noise variance. This determines
the (fixed) extraction profile - extraction is only optimal for this value
of signal-to-noise. It should be set to the minimum useable signal level.
fix_badpix: bool, optional
Does this routine attempt to fix bad pixels?
badpix: float array, optional
A *cube* of bad pixels (including a flag of where cosmic rays are in
each frame.
Returns
-------
(flux,sigma): (float array, float array)
The extracted flux, and the standard deviation of the extracted flux.
"""
if fix_badpix:
if len(badpix)==0:
badpix = pyfits.getdata(self.cdir + 'badpix.fits')
return_im=False
if len(cube)==0:
cube = []
for infile in infiles:
cube.append(self.basic_process(infile))
cube = np.array(cube)
if len(infiles)==1:
return_im=True
header = pyfits.getheader(self.ddir + infiles[0])
ftable = pyfits.getdata(self.ddir + infiles[0],1)
if len(cube.shape) == 2:
return_im=True
cube = cube[None,:]
if len(badpix.shape) == 2:
badpix = badpix[None,:]
#Now create the extraction subimages.
oversamp = 3
nslitlets=40
nfibres=10
npix_extract=15
nx = cube.shape[2]
psf = self.make_psf(npix=npix_extract, oversamp=oversamp)
#The following indices have a 0 for the middle index (
y_ix_oversamp = np.arange(oversamp*npix_extract) - oversamp*npix_extract//2
y_ix = ( np.arange(npix_extract) - npix_extract//2 )*oversamp
#The extracted flux
extracted_flux = np.zeros((cube.shape[0],nfibres*nslitlets,nx))
extracted_sigma = np.zeros((cube.shape[0],nfibres*nslitlets,nx))
#Much of this is copied from fit_tramlines - so could be streamlined !!!
try:
p_tramline = np.loadtxt(self.rdir + 'tramlines_p' + header['SOURCE'][6] + '.txt')
except:
print("No tramline file in reduction directory! Using default from calibration directory")
p_tramline = np.loadtxt(self.cdir + 'tramlines_p' + header['SOURCE'][6] + '.txt')
#Make a matrix that maps p_tramline numbers to dy
tramline_matrix = np.zeros((nfibres,nx,4))
for i in range(nfibres):
tramline_matrix[i,:,0] = np.arange(nx)**2
tramline_matrix[i,:,1] = np.arange(nx)
tramline_matrix[i,:,2] = np.ones( nx )
for k in range(nx):
tramline_matrix[:,k,3] = np.arange(nfibres)+0.5-nfibres//2
psfim = np.zeros((npix_extract,nx))
psfim_yix = np.repeat(np.arange(npix_extract)*oversamp + oversamp//2,nx).reshape((npix_extract,nx))
print("Beginning extraction...")
for i in range(nslitlets):
ypix = np.dot(tramline_matrix, p_tramline[i,:])
ypix_int = np.mean(ypix,axis=1).astype(int)
ypix_int = np.maximum(ypix_int,npix_extract//2)
ypix_int = np.minimum(ypix_int,cube.shape[1]-npix_extract//2)
for j in range(nfibres):
#This image has an odd number of pixels. Lets extract in units of electrons, not DN.
subims = cube[:,ypix_int[j] - npix_extract//2:ypix_int[j] + npix_extract//2 + 1,:]*header['RO_GAIN']
subbad = badpix[:,ypix_int[j] - npix_extract//2:ypix_int[j] + npix_extract//2 + 1,:]
#Start off with a slow interpolation for simplicity. Now removed...
#for k in range(nx):
# psfim[:,k] = np.interp(y_ix - (ypix[j,k] - ypix_int[j])*oversamp, y_ix_oversamp, psf)
#A fast interpolation... A centered PSF will have ypix=ypix_int
ypix_diff_oversamp = -(ypix[j,:] - ypix_int[j])*oversamp
ypix_diff_int = np.floor(ypix_diff_oversamp)
ypix_diff_rem = ypix_diff_oversamp - ypix_diff_int
ix0 = (psfim_yix + np.tile(ypix_diff_int,npix_extract).reshape((npix_extract,nx))).astype(int)
ix1 = ix0+1
ix0 = np.maximum(ix0,0)
ix0 = np.minimum(ix0,npix_extract*oversamp-1)
ix1 = np.maximum(ix1,0)
ix1 = np.minimum(ix1,npix_extract*oversamp-1)
frac = np.tile(ypix_diff_rem,npix_extract).reshape((npix_extract,nx))
psfim = psf[ix0]*(1 - frac) + psf[ix1]*frac
#Now we turn the PSF into weights
weights = flux_on_ron_var*psfim/(1 + flux_on_ron_var*psfim)
psfim /= np.sum(psfim,axis=0)
for cube_ix in range( cube.shape[0] ):
good_weights = weights*(1-subbad[cube_ix,:,:])
#Normalise so that a flux of 1 with the shape of the model PSF will
#give an extracted flux of 1. If the sum of the weights is zero (e.g. a bad
#column, then this is a zero/zero error. A typical weight is of order unity.
#There will be some divide by zeros... which we'll fix later.
ww = np.where(np.sum(good_weights,axis=0)==0)[0]
with np.errstate(invalid='ignore'):
good_weights /= np.sum(good_weights*psfim,axis=0)
#Now do the extraction!
extracted_flux[cube_ix,i*nfibres + j,:] = np.sum(good_weights*subims[cube_ix,:,:],axis=0)
extracted_sigma[cube_ix,i*nfibres + j,:] = np.sqrt(np.sum(good_weights**2*(subims[cube_ix,:,:] + header['RO_NOISE']**2), axis=0))
extracted_sigma[cube_ix,i*nfibres + j,ww]=np.inf
print("Finished extraction...")
#!!! TODO: We can check for bad pixels that were missed at this point, e.g.
#bad columns that weren't very bad and didn't show up pre-extraction. If we really
#want to have fun with this, smoothing by windowing the Fourier transform of
#the data may work best.
if return_im:
return extracted_flux[0,:,:], extracted_sigma[0,:,:]
else:
return extracted_flux, extracted_sigma
def create_fibre_flat(self, infile, smooth_threshold=1e3, smoothit=0, sigma_cut=5.0):
"""Based on a single flat, compute the fiber flat field, corrected for
individual fibre throughputs.
NB the ghost images turn up in this also.
Parameters
----------
infile: string
The input filename
smooth_threshold:
If average counts are lower than this, we smooth the fibre flat
(no point dividing by noise).
smoothit:
The width of the smoothing filter.
sigma_cut:
The cut in standard deviations to look for bad extracted pixels, when compared
to median-filtered extracted pixels.
Returns
-------
fibre_flux: float (nfibres, nx) array
The fiber flat field.
"""
fibre_flux, fibre_sigma = self.extract([infile])
if np.median(fibre_flux) < smooth_threshold:
smoothit = 25
#First, reject outliers in individual rows rather agressively
#(remember the spectra are smooth)
fibre_sigma = nd.filters.median_filter(fibre_sigma, size=(1,25))
fibre_flux_medfilt = nd.filters.median_filter(fibre_flux, size=(1,25))
ww = np.where(np.abs(fibre_flux - fibre_flux_medfilt) > sigma_cut*fibre_sigma)
fibre_flux[ww] = fibre_flux_medfilt[ww]
if smoothit>0:
fibre_flux = np.convolve(fibre_flux,np.ones(smoothit)/smoothit,mode='same')
#Find dead fibres.
fib_table = pyfits.getdata(self.ddir + infile,1)
#Check for an obscure bug, where the extension orders are changed...
if len(fib_table)==1:
fib_table = pyfits.getdata(self.ddir + infile,2)
off_sky = np.where( (fib_table['TYPE'] != 'S') * (fib_table['TYPE'] != 'P'))[0]
on_sky = np.where( 1 - (fib_table['TYPE'] != 'S') * (fib_table['TYPE'] != 'P'))[0]
med_fluxes = np.median(fibre_flux,axis=1)
wbad = np.where(med_fluxes[on_sky] < 0.1*np.median(med_fluxes[on_sky]))[0]
if len(wbad)>0:
print("Bad fibres (<10% of median flux): " + str(wbad))
#!!! Unsure what to do with this. At the moment the data will just look bad
#and will be cut due to S/N later.
#We always return a *normalised* fiber flux, so that we're at least close to
#the raw data.
return fibre_flux/np.median(fibre_flux[on_sky,:])
def sky_subtract(self, infiles, extracted_flux,extracted_sigma, wavelengths, sigma_cut=5.0, fibre_flat=[]):
"""Subtract the sky from each extracted spectrum. This should be done after
cross-talk and scattered light removal, but is in itself a crude way to
remove scattered light. Note that all files input are assumed to have
the same wavelength scale and fiber table.
The fibre flat correction is also done at this time.
Notes
-----
There appears to be real structure in the flat at the 0.5% level... but
this has to be confirmed with multiple epoch tests. It is a little suspicious
as the structure is more or less white.
TODO: !!!
1) Uncertainties in sky, based on input sigma and interpolating past the chip edge.
2) Uncertainties in data, based on sky subtraction.
3) bad pixels! """
#Find the list of sky and object fibres.
fib_table = pyfits.getdata(self.ddir + infiles[0],1)
#Check for an obscure bug, where the extension orders are changed...
if len(fib_table)==1:
fib_table = pyfits.getdata(self.ddir + infiles[0],2)
sky = np.where(fib_table['TYPE']=='S')[0]
ns = len(sky)
#Apply fibre flat.
if len(fibre_flat)>0:
sky_fibre_variance = 1.0/np.median(fibre_flat[sky,:],axis=1)**2
for cube_ix in range(len(infiles)):
extracted_flux[cube_ix,:,:] /= fibre_flat
extracted_sigma[cube_ix,:,:] /= fibre_flat
else:
sky_fibre_variance = np.ones(ns)
#Go through sky fibres one at a time and reconstruct their spectra from the other
#sky fibres.
nx = wavelengths.shape[1]
nf = len(infiles)
sky_flux = extracted_flux[:,sky,:]
sky_sigma = extracted_sigma[:,sky,:]
bad_skies = []
#j is an index that runs from 0 to the number of sky fibers.
#s is the actual fits file index of the fiber.
for j,s in enumerate(sky):
ww = sky[np.where(sky != s)[0]]
sky_spectra_interp = np.zeros((nf,ns-1,nx))
sky_sigma_interp = np.zeros((nf,ns-1,nx))
for k, sky_other in enumerate(ww):
#Another manual interpolation... Find the index corresponding to the
#wavelength of our target sky fiber. e.g. if pixel 10 for s correponds
#to pixel 11 for sky_other, we want ix=11
ix = np.interp(wavelengths[s,:], wavelengths[sky_other,:],np.arange(nx))
#Divide this into integer and fractional parts.
ix_int = np.floor(ix).astype(int)
#!!! This currently has edge effects !!!
ix_int = np.maximum(ix_int,0)
ix_int = np.minimum(ix_int,nx-2)
ix_frac = ix - ix_int
for i in range(nf):
sky_spectra_interp[i,k,:] = extracted_flux[i,sky_other,ix_int]*(1-ix_frac) + extracted_flux[i,sky_other,ix_int+1]*ix_frac
sky_sigma_interp[i,k,:] = extracted_sigma[i,sky_other,ix_int]*(1-ix_frac) + extracted_sigma[i,sky_other,ix_int+1]*ix_frac
sky_spectra_recon = np.median(sky_spectra_interp, axis=1)
sky_sigma_recon = np.median(sky_sigma_interp, axis=1)
#Now find outliers and correct them. It is important that the sky_sigma is also a robust statistic.
#... for this to work for nan values... and gradients (up to a factor of 2 for scattered light) to be fine.
#ww = np.where(np.abs(extracted_flux[:,s,:] - sky_spectra_recon) > sigma_cut*sky_sigma_recon)
#The debugger lines below show that there is still some work to do !!!
for i in range(nf):
scaling_factor = np.median(extracted_flux[i,s,:]/sky_spectra_recon[i,:])
if np.abs(np.log(scaling_factor)) > np.log(2):
print("Unusual sky fiber! Scaling factor required: " + str(scaling_factor))
bad_skies.append(j)
#For testing import pdb; pdb.set_trace()
ww = np.where(np.logical_not(np.abs(extracted_flux[i,s,:] - scaling_factor*sky_spectra_recon[i,:]) < scaling_factor*sigma_cut*sky_sigma_recon[i,:]))[0]
#Look for invalid values...
if len(ww) > 400:
print("Crazy number of bad pixels in reconstructing sky!")
bad_skies.append(j)
#For testing import pdb; pdb.set_trace()
sky_flux[i,j,ww] = sky_spectra_recon[i,ww]*scaling_factor
sky_sigma[i,j,ww] = sky_sigma_recon[i,ww]*scaling_factor
#If we've flagged a bad sky fiber, remove it now.
good_skies = np.arange(ns)
for abad in bad_skies:
good_skies = good_skies[np.where(good_skies != abad)[0]]
sky = sky[good_skies]
sky_flux = sky_flux[:,good_skies,:]
sky_sigma = sky_sigma[:,good_skies,:]
sky_fibre_variance = sky_fibre_variance[good_skies]
ns = len(sky)
#Now do the same for the extracted object fibers... except in this case we use a weighted average.
#Include sky fibers as "objects" as a sanity check.
objects = np.where(np.logical_or(fib_table['TYPE']=='P',fib_table['TYPE']=='S'))[0]
for o in objects:
#Find the dx and dy values for the positioner.
dx_pos = fib_table['X'][sky] - fib_table['X'][o]
dy_pos = fib_table['Y'][sky] - fib_table['Y'][o]
#Create the quadratic programming problem.
#http://en.wikipedia.org/wiki/Quadratic_programming
#Start with equality constraints...
E = np.array([dx_pos,dy_pos,np.ones(ns)])
c_d = np.zeros( ns+3 )
c_d[-1] = 1.0
the_matrix = np.zeros( (ns+3, ns+3) )
ix = np.arange(ns)
#Weight by inverse fiber throughput squared - appropriate for
#readout-noise limited sky data, typical of HERMES.
the_matrix[ix,ix] = sky_fibre_variance
the_matrix[ns:,0:ns] = E
the_matrix[0:ns,ns:] = np.transpose(E)
x_lambda = np.linalg.solve(the_matrix, c_d)
weights = x_lambda[0:ns]
old_flux = extracted_flux.copy()
#Lets save the weighted average sky separately... great for
#bug-shooting.
sky_to_subtract = np.zeros( (nf,nx) )
for k,s in enumerate(sky):
#Interpolate the wavelength scale for each fiber, subtracting off the interpolated
#sky fiber flux multiplied by our pre-computed weights.
#!!! This currently has edge effects !!!
#!!! And is copied from above... should be its own routine once uncertainties are sorted !!!
ix = np.interp(wavelengths[o,:], wavelengths[s,:],np.arange(nx))
ix_int = np.floor(ix).astype(int)
ix_int = np.maximum(ix_int,0)
ix_int = np.minimum(ix_int,nx-2)
ix_frac = ix - ix_int
for i in range(nf):
sky_to_subtract[i,:] += weights[k]*(sky_flux[i,k,ix_int]*(1-ix_frac) + sky_flux[i,k,ix_int+1]*ix_frac )
#Now subtract the sky!
extracted_flux[:,o,:] -= sky_to_subtract
return extracted_flux, extracted_sigma
def save_extracted(self, infiles, extracted_flux,extracted_sigma, wavelengths):
"""Save extracted spectra from a set of input files to individual
files, labelled similarly to 2dFDR
NOT IMPLEMENTED YET (is this a separate "manual save" routine?) """
raise UserWarning
def combine_multi_epoch_spectra(self,coord_limit=-1,search_galahic=False, fractional_snr_limit=0.3):
"""An out a flux-weighted epoch for the observation is placed in the header.
Parameters
----------
coord_limit: float
Difference in coordinates (arcsec) for two objects to be considered the same.
NOT IMPLELENTED
search_galahic: boolearn
Do we search non-galahic stars to see if they are galahic stars with a different
label?
fractional_snr_limit: float
What fraction of the peak SNR does a new epoch need in order to be combined.
"""
return []
def make_comb_filename(self, outfile):
"""Create filename by inserting "comb" in-between the name of
the file and ".fits
"""
spos = outfile.find('.fits')
if spos < 0:
print("Weird file name... no fits extension")
raise UserWarning
return outfile[:spos] + 'comb' + outfile[spos:]
def combine_single_epoch_spectra(self, extracted_flux, extracted_sigma, wavelengths, infiles=[], \
csvfile='observation_table.csv', is_std=False,cobfile=[]):
""" If spectra were taken in a single night with a single arc, we can approximate
the radial velocity as constant between frames, and combine the spectra in
pixel space.
Parameters
----------
extracted_flux: (nfiles,nfibres*nslitlets,nx) array
Flux extracted for a set of files.
extracted_sigma: (nfiles,nfibres*nslitlets,nx) array
Standard deviations extracted from a set of files
wavelengths: (nfibres*nslitlets,nx) array
Common wavelength array.
infiles: string list (optional)
If given, the combined spectrum is output to a combined fits file that
contains the combined flux and the combined single epoch spectra.
csvfile: string
If given, key parameters are appended to the observations table.
NB At this point, no checking is done to see if the software runs multiple times,
just appending to previous files.
Returns
-------
flux_comb, flux_comb_sigma: ((nfibres*nslitlets,nx) array, (nfibres*nslitlets,nx) array)
Combined flux and combined standard deviation.
"""
#The combination is simply a weighted arithmetic mean, as we already
#have the variance as an input parameter.
weights = 1/extracted_sigma**2
ww = np.where(extracted_flux != extracted_flux)
extracted_flux[ww] = 0.0
flux_comb = np.sum(weights*extracted_flux,0)/np.sum(weights,0)
extracted_flux[ww] = np.nan
flux_comb_sigma = np.sqrt(1.0/np.sum(weights,0))
cob = pyfits.getheader(self.ddir + cobfile)['RUN']
#Save the data if necessary.
if len(infiles)>0:
headers = []
for infile in infiles:
headers.append(pyfits.getheader(self.ddir + infile))
runs = [aheader['RUN'] for aheader in headers]
start = np.argmin(runs)
end = np.argmax(runs)
header = headers[start]
#To keep a record of which files went in to this.
header['RUNLIST'] = str(runs)[1:-1]
header['NRUNS'] = len(infiles)
#Start and end
header['HAEND'] = headers[end]['HAEND']
header['ZDEND'] = headers[end]['ZDEND']
header['UTEND'] = headers[end]['UTEND']
header['STEND'] = headers[end]['STEND']
header['HASTART'] = headers[start]['HASTART']
header['ZDSTART'] = headers[start]['ZDSTART']
header['UTSTART'] = headers[start]['UTSTART']
header['STSTART'] = headers[start]['STSTART']
#Means. If more accuracy than this is needed, then individual
#(i.e. non-combined) files should be used!
header['EPOCH'] = np.mean([aheader['EPOCH'] for aheader in headers])
header['UTMJD'] = np.mean([aheader['UTMJD'] for aheader in headers])
#For GALAH, we need to create a special directory. #Ly changed to accomodate non-standard cfg files
cfg = header['CFG_FILE']
if 'gf' in cfg:
ix0 = header['CFG_FILE'].find('_')
ix1 = header['CFG_FILE'].rfind('_')
field_directory = header['CFG_FILE'][ix0+1:ix1]
else:
field_directory = cfg.replace('.sds','')
if not os.path.exists(self.gdir + '/' + field_directory):
os.makedirs(self.gdir + '/' + field_directory)
#The header and fiber table of the first input file is retained, with
#key parameters averaged from each header
#The first fits image (zeroth extension) is the combined flux.
#The first fits extension is the fiber table
#The second fits extension is the uncertainty in the combined flux.
#The third fits extension is the wavelength scale.
hl = pyfits.HDUList()
hl.append(pyfits.ImageHDU(flux_comb.astype('f4'),header))
fib_table = pyfits.getdata(self.ddir + infiles[start],1)
#Check for an obscure bug, where the extension orders are changed...
if len(fib_table)==1:
fib_table = pyfits.getdata(self.ddir + infiles[start],2)
hl.append(pyfits.open(self.ddir + infiles[start])[2])
else:
hl.append(pyfits.open(self.ddir + infiles[start])[1])
hl.append(pyfits.ImageHDU(flux_comb_sigma.astype('f4')))
#!!! TODO: Add bcorr to the per-field fits files, and header keywords to
#see if barycorr has been applied.
if barycorr:
logwave_flux_hdu, logwave_sigma_hdu, linwave_flux, linwave_sigma, wave_new, bcorr = \
self.create_barycentric_spectra(header, fib_table, flux_comb, flux_comb_sigma, wavelengths)
hl.append(pyfits.ImageHDU(wave_new))
#Add the extra log-wavelength extensions that Mike seems to like so much.
hl.append(logwave_flux_hdu)
hl.append(logwave_sigma_hdu)
header['BCORR']='True'
else:
hl.append(pyfits.ImageHDU(wavelengths))
header['BCORR']='False'
#Lets always name the file by the first file in the set.
outfile = self.make_comb_filename(infiles[start])
hl.writeto(self.rdir + outfile,clobber=True)
objects = np.where(fib_table['TYPE']=='P')[0]
#See if we need to write a csv file header line...
if not os.path.isfile(self.rdir + csvfile):
f_csvfile = open(self.rdir + csvfile, 'a')
f_csvfile.write('obsdate, run_start, run_end, fib_num, galahic_num, idname, snr, software,file, rdate\n')
f_csvfile.close()
f_csvfile = open(self.rdir + csvfile, 'a')
data_date = header['UTDATE'][2:4] + header['UTDATE'][5:7] + header['UTDATE'][8:10]
now = time.gmtime()
analysis_date = '{0:02d}{1:02d}{2:02d}'.format(now.tm_year-2000, now.tm_mon, now.tm_mday)
if is_std:
o = np.where(fib_table['PIVOT'] == (header['STD_FIB']))[0][0]
medsnrs = np.median(flux_comb/flux_comb_sigma, axis=1)
if np.argmax(medsnrs) != o:
print("Something dodgy with this standard fiber: " +header['STD_NAME']+"! Please check manually here...")
pdb.set_trace()
#File name = standardstarname_ccd.fits
#WG4 to rename/work around to suit their codes
filename = "{0}_{1}.fits".format(header['STD_NAME'].replace(' ',''),self.ccd_nums[header['SPECTID']])
flux_hdu = pyfits.ImageHDU(linwave_flux.data[o,:].astype('f4'),header)
sig_hdu = pyfits.ImageHDU(linwave_sigma.data[o,:].astype('f4'))
#Add in header stuff from fiber table
flux_hdu.header['RA'] = header['MEANRA']
flux_hdu.header['DEC'] = header['MEANDEC']
flux_hdu.header['V_BARY'] = bcorr[o]
flux_hdu.header['FIBRE'] = o + 1
for key in ("CRVAL1", "CDELT1", "CRPIX1", "CTYPE1", "CUNIT1"):
flux_hdu.header[key] = linwave_flux.header[key]
sig_hdu.header[key] = linwave_sigma.header[key]
hl = pyfits.HDUList()
hl.append(flux_hdu)
hl.append(sig_hdu)
hl.writeto(self.gdir + '/' + field_directory +'/' + filename, clobber=True)
f_csvfile.write('{0:s},{1:d},{2:d},{3:d},{4:d},{5:s},{6:6.1f},{7:5.2f},{8:s},{9:s}\n'.format(
data_date,runs[start], runs[end], o+1, -1, header['STD_NAME'].replace(' ',''),
np.median(flux_comb[o,:]/flux_comb_sigma[o,:]), self.release,outfile,analysis_date))
else:
for o in objects:
strpos = fib_table[o]['NAME'].find('galahic_')
if strpos >= 0:
try:
galahic = int(fib_table[o]['NAME'][strpos+8:])
except:
galahic=-1
else:
galahic=-1
#date, minimum file number, maximum file number, fiber number,
#input catalog number, input catalog name, signal-to-noise,
#software release version, output file, analysis date
#NB: The detector isn't here... a separate program has to take all these files
#and add those details.
f_csvfile.write('{0:s},{1:d},{2:d},{3:d},{4:d},{5:s},{6:6.1f},{7:5.2f},{8:s},{9:s}\n'.format(
data_date,runs[start], runs[end], o+1, galahic, fib_table[o]['NAME'],
np.median(flux_comb[o,:]/flux_comb_sigma[o,:]), self.release,outfile,analysis_date))
if (galahic==-1):
galahic = 'FIB' + str(o+1)
filename = "{0}_{1}_{2}.fits".format(int(float(data_date)*10000+float(cob)),galahic,self.ccd_nums[header['SPECTID']])
flux_hdu = pyfits.ImageHDU(linwave_flux.data[o,:].astype('f4'),header)
sig_hdu = pyfits.ImageHDU(linwave_sigma.data[o,:].astype('f4'))
#Add in header stuff from fiber table.
flux_hdu.header['OBJ_NAME'] = fib_table[o]['NAME']
flux_hdu.header['RA'] = np.degrees(fib_table[o]['RA'])
flux_hdu.header['DEC'] = np.degrees(fib_table[o]['DEC'])
flux_hdu.header['V_BARY'] = bcorr[o]
flux_hdu.header['FIBRE'] = o + 1 #!!! Starting at 1 for 2dFDR convention...
flux_hdu.header['PIVOT'] = fib_table[o]['PIVOT']
for key in ("CRVAL1", "CDELT1", "CRPIX1", "CTYPE1", "CUNIT1"):
flux_hdu.header[key] = linwave_flux.header[key]
sig_hdu.header[key] = linwave_sigma.header[key]
hl = pyfits.HDUList()
hl.append(flux_hdu)
hl.append(sig_hdu)
hl.writeto(self.gdir + '/' + field_directory +'/' + filename, clobber=True)
f_csvfile.close()
return flux_comb, flux_comb_sigma
def create_barycentric_spectra(self, header, fib_table, flux, flux_sigma, wavelengths,is_std=False):
"""Interpolate flux onto a wavelength grid spaced regularly in log(wavelength),
after shifting to the solar system barycenter"""
if not barycorr:
print("ERROR: Need PyAstronomy for create_barycentric_spectra()")
raise UserWarning
#The bcorr is the barycentric correction in km/s, with a sign convenction
#with positive meaning moving towards the star. This means that we have to red-shift
#the interpolated spectra, meaning that the new wavelength scale has to be shifted
#to the blue.
if is_std:
hcorr, bcorr = pyasl.baryCorr(header['UTMJD'] + 2400000.5, header['MEANRA'], header['MEANDEC'], deq=2000.0)
else:
hcorr, bcorr = pyasl.baryCorr(header['UTMJD'] + 2400000.5, np.degrees(fib_table['RA']),np.degrees(fib_table['DEC']), deq=2000.0)
nfib = wavelengths.shape[0]
new_flux = np.zeros( (nfib,self.fixed_nwave) )
new_flux_sigma = np.zeros( (nfib,self.fixed_nwave) )
new_lin_flux = np.zeros( (nfib,self.fixed_nwave) )
new_lin_flux_sigma = np.zeros( (nfib,self.fixed_nwave) )
new_wave = self.fixed_wave0[header['SPECTID']]*np.exp(np.arange(self.fixed_nwave)/float(self.fixed_R))
new_lin_wave = self.fixed_wave0[header['SPECTID']]*(1 + np.arange(self.fixed_nwave)/float(self.fixed_R))
dnew_wave = new_wave[1:]-new_wave[:-1]
dnew_wave = np.append(dnew_wave, dnew_wave[-1])
dnew_lin_wave = new_lin_wave[1]-new_lin_wave[0]
new_wavelengths = wavelengths.copy()
for i in range(nfib):
new_wavelengths[i,:] = wavelengths[i,:]*(1 + bcorr[i]/2.9979e5)
dwave = new_wavelengths[i,1:]-new_wavelengths[i,:-1]
dwave = np.append(dwave, dwave[-1])
dwave_lin = np.interp(new_lin_wave, new_wavelengths[i,:], dwave)
dwave = np.interp(new_wave, new_wavelengths[i,:], dwave)
new_flux[i,:] = np.interp(new_wave, new_wavelengths[i,:], flux[i,:], left=np.nan, right=np.nan)
new_lin_flux[i,:] = np.interp(new_lin_wave, new_wavelengths[i,:], flux[i,:], left=np.nan, right=np.nan)
#Preserve the meaning of sigma if many samples are averaged together.
new_flux_sigma[i,:] = np.interp(new_wave, new_wavelengths[i,:], flux_sigma[i,:],
left=np.nan, right=np.nan) * np.sqrt(dnew_wave/dwave)
new_lin_flux_sigma[i,:] = np.interp(new_lin_wave, new_wavelengths[i,:], flux_sigma[i,:],
left=np.nan, right=np.nan) * np.sqrt(dnew_lin_wave/dwave_lin)
# The log-wavelength header
new_hdu = pyfits.ImageHDU(new_flux.astype('f4'))
sig_hdu = pyfits.ImageHDU(new_flux_sigma.astype('f4'))
new_hdu.header['CRVAL1']=np.log(self.fixed_wave0[header['SPECTID']])
new_hdu.header['CDELT1']=1.0/self.fixed_R
new_hdu.header['CRPIX1']=1.0
new_hdu.header['CRVAL2']=0.0
new_hdu.header['CDELT2']=1.0
new_hdu.header['CRPIX2']=1.0
new_hdu.header['CTYPE1']='log(Wavelength)'
new_hdu.header['CUNIT1']='Angstroms'
new_hdu.header['CTYPE2']='Fibre Number'
new_hdu.header['CUNIT2'] = ''
sig_hdu.header = new_hdu.header
# The linear-wavelength header
new_lin_hdu = pyfits.ImageHDU(new_lin_flux.astype('f4'))
lin_sig_hdu = pyfits.ImageHDU(new_lin_flux_sigma.astype('f4'))
new_lin_hdu.header['CRVAL1']=self.fixed_wave0[header['SPECTID']]
new_lin_hdu.header['CDELT1']=self.fixed_wave0[header['SPECTID']]/self.fixed_R
new_lin_hdu.header['CRPIX1']=1.0
new_lin_hdu.header['CRVAL2']=0.0
new_lin_hdu.header['CDELT2']=1.0
new_lin_hdu.header['CRPIX2']=1.0
new_lin_hdu.header['CTYPE1']='Wavelength'
new_lin_hdu.header['CUNIT1']='Angstroms'
new_lin_hdu.header['CTYPE2']='Fibre Number'
new_lin_hdu.header['CUNIT2'] = ''
lin_sig_hdu.header = new_lin_hdu.header
return new_hdu, sig_hdu, new_lin_hdu, lin_sig_hdu, new_wavelengths, bcorr
def fit_tramlines(self, infile, subtract_bias=False, fix_badpix=False):
"""Make a linear fit to tramlines, based on a simplified PSF model """
im = self.basic_process(infile)
header = pyfits.getheader(self.ddir + infile)
ftable = pyfits.getdata(self.ddir + infile,1)
if subtract_bias:
im -= pyfits.getdata(self.rdir + 'bias.fits')
if fix_badpix:
medim = nd.filters.median_filter(im,size=5)
badpix = pyfits.getdata(self.cdir + 'badpix.fits')
ww = np.where(badpix)
im[ww] = medim[ww]
nsamp = 8
nslitlets=40
nfibres=10
#Maximum number of pixels for extraction, including tramline tilt.
npix_extract = 20
#Oversampling of the PSF - should be an odd number due to symmetrical
#convolutions.
oversamp = 3
dely_deriv = 0.01
flux_min = 10
nx = im.shape[1]
psf = self.make_psf(npix=npix_extract, oversamp=oversamp)
#Manually set the index for the samples to the median filtered image.
#!!! Hardwired numbers - to be changed for FunnelWeb
x_ix = 256 + np.arange(nsamp,dtype=int)*512
y_ix_oversamp = np.arange(oversamp*npix_extract) + 0.5 - (oversamp*npix_extract)/2.0
y_ix = ( np.arange(npix_extract) + 0.5 - (npix_extract)/2.0 )*oversamp
#Filtered image
imf = nd.filters.median_filter(im,size=(1,11))
imf = imf[:,x_ix]
psfim_plus = np.zeros((npix_extract, nsamp))
psfim_minus = np.zeros((npix_extract, nsamp))
dy = np.zeros((nslitlets,nfibres,nsamp))
weight_dy = np.zeros((nslitlets,nfibres,nsamp))
#Read in the tramline initial parameters from the calibration directory
p_tramline = np.loadtxt(self.cdir + 'tramlines_p' + header['SOURCE'][6] + '.txt')
#Make a matrix that maps p_tramline numbers to dy, i.e. for parameters
#p_tramline, we get the y positions by np.dot(tramline_matrix,p_tramline)
tramline_matrix = np.zeros((nfibres*nsamp,4))
tramline_matrix[:,0] = np.tile(x_ix**2,nfibres) # Parabolic term with x
tramline_matrix[:,1] = np.tile(x_ix,nfibres) # Linear term with x
tramline_matrix[:,2] = np.ones( nfibres*nsamp ) # Offset term
tramline_matrix[:,3] = np.repeat( (np.arange(nfibres)+0.5-nfibres//2),
nsamp ) # Stretch term.
#Loop through a few different offsets to get a global shift.
ypix = np.dot(tramline_matrix,p_tramline.T)
ypix = ypix.reshape( (nfibres,nsamp,nslitlets) )
ypix = np.swapaxes(ypix,0,1).flatten().astype(int)
xpix = np.repeat( range(nsamp), nfibres*nslitlets)
nshifts = 20
flux_peak = np.zeros(nshifts)
for i in range(nshifts):
flux_peak[i] = np.sum(imf[np.maximum(np.minimum(ypix+i-nshifts//2,nx),0),xpix])
p_tramline[:,2] += np.argmax(flux_peak) - nshifts//2
#Make 4 Newton-Rhapson iterations to find the best fitting tramline parameters
for count in range(0,3):
#Go through every slitlet, fiber and sample (nsamp) in the wavelength
#direction, finding the offsets.
for i in range(nslitlets):
for j in range(nfibres):
center_int = np.int(p_tramline[i,2] + p_tramline[i,3]*(j+0.5-nfibres//2))
center_int = np.maximum(center_int,npix_extract//2)
center_int = np.minimum(center_int,nx-npix_extract//2)
subim = imf[center_int - npix_extract//2:center_int + npix_extract//2,:]
#Start off with a slow interpolation for simplicity.
for k in range(nsamp):
offset = p_tramline[i,2] + p_tramline[i,1]*x_ix[k] + p_tramline[i,0]*x_ix[k]**2 + p_tramline[i,3]*(j+0.5-nfibres//2) - center_int
psfim_plus[:,k] = np.interp(y_ix - (offset + dely_deriv)*oversamp, y_ix_oversamp, psf)
psfim_minus[:,k] = np.interp(y_ix - (offset - dely_deriv)*oversamp, y_ix_oversamp, psf)
psfim = 0.5*(psfim_plus + psfim_minus)
psfim_deriv = ( psfim_plus - psfim_minus )/2.0/dely_deriv
psfsum = np.sum(psfim*subim,axis=0)
dy[i,j,:] = np.sum(psfim_deriv*subim,axis=0)/np.maximum(psfsum,flux_min)*np.sum(psfim**2)/np.sum(psfim_deriv**2)
weight_dy[i,j,:] = np.maximum(psfsum-flux_min,0)
print("RMS tramline offset (iteration " +str(count)+ "): " + str(np.sqrt(np.mean(dy**2))))
for i in range(nslitlets):
#Now we fit to the dy values.
W = np.diag(weight_dy[i,:,:].flatten())
y = dy[i,:,:].flatten()
delta_p = np.linalg.solve(np.dot(np.transpose(tramline_matrix),np.dot(W,tramline_matrix)) ,\
np.dot(np.transpose(tramline_matrix),np.dot(W,y)) )
p_tramline[i,:] += delta_p
np.savetxt(self.rdir + 'tramlines_p' + header['SOURCE'][6] + '.txt', p_tramline, fmt='%.5e')
def reduce_field(self,obj_files, arc_file, flat_file, is_std=False, cobfile=[]):
"""A wrapper to completely reduce a field, assuming that a bias already exists."""
self.fit_tramlines(flat_file)
fibre_flat = self.create_fibre_flat(flat_file)
arc, arc_sig = self.extract([arc_file])
wavelengths = self.fit_arclines(arc, pyfits.getheader(self.ddir + arc_file))
cube,badpix = self.make_cube_and_bad(obj_files)
flux, sigma = self.extract(obj_files, cube=cube, badpix=badpix)
if not is_std:
flux, sigma = self.sky_subtract(obj_files, flux, sigma, wavelengths, fibre_flat=fibre_flat)
comb_flux, comb_flux_sigma = self.combine_single_epoch_spectra(flux, sigma, wavelengths, infiles=obj_files, is_std=is_std,cobfile=cobfile)
return comb_flux, comb_flux_sigma
def go(self, min_obj_files=2, dobias=True, skip_done=False):
"""A simple function that finds all fully-executed fields (in this case meaning
at least min_obj_files exposures on the field) and analyses them.
Parameters
----------
min_obj_files: int
Minimum number of files per field to call it "good"
dobias: boolean
Do we bother subtracting the bias frame.
"""
all_files = np.array(sorted([os.path.basename(x) for x in glob.glob(self.ddir + '[0123]*[0123456789].fit*')]))
if len(all_files)==0:
print("You silly operator. No files. Input directory is: " + self.ddir)
return
biases = np.array([],dtype=np.int)
flats = np.array([],dtype=np.int)
arcs = np.array([],dtype=np.int)
objects = np.array([],dtype=np.int)
is_stds = np.array([],dtype=np.bool)
cfgs = np.array([],dtype=np.int)
field_ids = np.array([],dtype=np.int)
for i,file in enumerate(all_files):
header= pyfits.getheader(self.ddir + file)
try:
cfg = header['CFG_FILE']
except:
cfg = ''
cfgs = np.append(cfgs,cfg)
field_id = cfg
is_std = False
if header['NDFCLASS'] == 'BIAS':
biases = np.append(biases,i)
#!!! No idea what LFLAT is, but it seems to be a flat.
#!!! Unfortunately, if it is used, the header['SOURCE'] seems to be invalid, so
#the code doesn't know what field is in use.
elif header['NDFCLASS'] == 'MFFFF':
flats = np.append(flats,i)
elif header['NDFCLASS'] == 'MFARC':
arcs = np.append(arcs,i)
elif (header['NDFCLASS'] == 'MFOBJECT'):
objects = np.append(objects,i)
elif (header['NDFCLASS'] == 'MFFLX'):
objects = np.append(objects,i)
is_std = True
field_id = cfg + header['STD_NAME']
else:
print("Unusual (ignored) NDFCLASS " + header['NDFCLASS'] + " for file: " + file)
field_ids = np.append(field_ids,field_id)
is_stds = np.append(is_stds, is_std)
#Forget about configs for the biases - just use all of them! (e.g. beginning and end of night)
if len(biases) > 2 and dobias:
if skip_done and os.path.isfile(self.rdir + '/' + 'bias.fits'):
print("Skipping (already done) bias creation")
else:
print("Creating Biases")
bias = self.median_combine(all_files[biases], 'bias.fits')
else:
print("No biases. Will use default...")
#Old code that treated all files with the same sds file as one.
# for cfg in set(cfgs):
# #For each config, check that there are enough files.
# cfg_flats = flats[np.where(cfgs[flats] == cfg)[0]]
# cfg_arcs = arcs[np.where(cfgs[arcs] == cfg)[0]]
# cfg_objects = objects[np.where(cfgs[objects] == cfg)[0]]
#Lets make a config index that changes every time there is a tumble.
cfg_starts = np.append(0,np.where(field_ids[1:] != field_ids[:-1])[0]+1)
cfg_ends = np.append(np.where(field_ids[1:] != field_ids[:-1])[0]+1,len(field_ids))
for i in range(len(cfg_starts)):
cfg_start = cfg_starts[i]
cfg_end = cfg_ends[i]
cfg_is_std = is_stds[cfg_starts[i]]
#For each config, check that there are enough files.
if is_stds[cfg_starts[i]]:
cfg_flats = flats[np.where( (flats >= cfg_start-2) & (flats < cfg_end+2))[0]]
same_cfg = np.where(cfgs[cfg_flats] == cfgs[cfg_starts[i]])[0]
cfg_flats = cfg_flats[same_cfg]
cfg_arcs = arcs[np.where( (arcs >= cfg_start-2) & (arcs < cfg_end+2))[0]]
same_cfg = np.where(cfgs[cfg_arcs] == cfgs[cfg_starts[i]])[0]
cfg_arcs = cfg_arcs[same_cfg]
else:
cfg_flats = flats[np.where( (flats >= cfg_start) & (flats < cfg_end))[0]]
cfg_arcs = arcs[np.where( (arcs >= cfg_start) & (arcs < cfg_end))[0]]
ww = np.where( (objects >= cfg_start) & (objects < cfg_end))[0]
cfg_objects = objects[ww]
cob = pyfits.getheader(self.ddir + all_files[cfg_start])['RUN']
if len(cfg_flats) == 0:
print("No flat for field: " + cfgs[cfg_start] + " Continuing to next field...")
elif len(cfg_arcs) == 0:
print("No arc for field: " + cfgs[cfg_start] + " Continuing to next field...")
elif len(cfg_objects) < min_obj_files:
print("Require at least 2 object files. Not satisfied for: " + cfgs[cfg_start] + " Continuing to next field...")
else:
if skip_done:
comb_filename = self.make_comb_filename(all_files[cfg_objects[0]])
if os.path.isfile(self.rdir + '/' + comb_filename):
header = pyfits.getheader(self.rdir + '/' + comb_filename)
if header['NRUNS'] == len(cfg_objects):
print("Ignoring processed field: " + comb_filename)
continue
print("Processing field: " + cfgs[cfg_start], 'COB=' + str(cob))
#!!! NB if there is more than 1 arc or flat, we could be more sophisticated here...
self.reduce_field(all_files[cfg_objects],all_files[cfg_arcs[0]], all_files[cfg_flats[0]], is_std = cfg_is_std,cobfile=all_files[cfg_start])
# !!! The "once-off" codes below here could maybe be their own module???
def find_tramlines(self, infile, subtract_bias=False, fix_badpix=False, nsearch=20, \
fillfrac=1.035, central_sep=9.3, global_offset=-6, c_nonlin=3.2e-4):
"""For a single flat field, find the tramlines. This is the slightly manual
part... the 4 numbers (fillfrac,central_sep, global_offset,c_nonlin) have
to be set so that a good fit is made.
If the fit is good, the tramlines_0.txt or tramlines_1.txt should be
moved to the calibration directory cdir """
nslitlets=40 #Number of slitlets.
nfibres=10 #Fibres per slitlet.
resamp = 3 #sub-pixel sampling in grid search
nsearch *= resamp
global_offset *= resamp
im = self.basic_process(infile)
header = pyfits.getheader(self.ddir + infile)
ftable = pyfits.getdata(self.ddir + infile,1)
if subtract_bias:
im -= pyfits.getdata(self.rdir + 'bias.fits')
if fix_badpix:
medim = nd.filters.median_filter(im,size=5)
badpix = pyfits.getdata(self.cdir + 'badpix.fits')
ww = np.where(badpix)
im[ww] = medim[ww]
szy = im.shape[0]
szx = im.shape[1]
#Next there are 2 general options...
#A: Deciding on the central wavelength pixel coordinates
#for each fiber image, and deciding on elements of a 2nd order polynomial, i.e.
# ypos = y(central) + a1*dx + a2*dx**2 + a3*dy*dx + a4*dx**2*dy
#Each of these is a standard nonlinear fitting process.
#B: Explicitly fit to each slitlet individually. This approach was chosen.
#
#Fibre separations in slitlets:
#First slitlet: 3.83 pix separations.
#Central slitlet: 3.67 pix separation.
#Last slitlet: 3.82 pix separations.
#i.e. separation = 3.67 + 4e-4*(slitlet - 20.5)**2
#Good/Bad gives the brightness of each fiber.
#OR... just fit a parabola to each slitlet.
#Central solution...
fit_px = [500,1500,2500,3500] #Pixels to try fitting the slitlet to.
ncuts = len(fit_px)
cuts = np.zeros((ncuts,szy*resamp))
for i in range(ncuts):
acut = np.median(im[:,fit_px[i]-2:fit_px[i]+3],axis=1)
cuts[i,:] = acut[np.arange(szy*resamp)/resamp]
# From Koala...
# params = np.loadtxt(pfile,dtype={
# 'names':('spos','grid_x', 'grid_y', 'good','name'),'formats':('i2','i2','i2','S4','S15')})
#Now we run through the slitlets... some fixed numbers in here.
soffsets = np.zeros((nslitlets,ncuts),dtype='int')
outf = open(self.rdir + 'tramlines_p' + header['SOURCE'][6] + '.txt','w')
plt.clf()
plt.imshow(np.minimum(im,3*np.mean(im)),aspect='auto', cmap=cm.gray, interpolation='nearest')
x = np.arange(szx)
for i in np.arange(nslitlets):
#flux = (ftable[i*nfibres:(i+1)*nfibres]['TYPE'] != 'N') * (ftable[i*nfibres:(i+1)*nfibres]['TYPE'] != 'F')
flux = (ftable[i*nfibres:(i+1)*nfibres]['TYPE'] != 'F')
#8.33, 9.33, 8.44
fsep = resamp*central_sep*(1 - c_nonlin*(i - (nslitlets-1)/2.0)**2)
#subsample by a factor of resamp only, i.e. 4 x 3 = 12 pix per fibre.
prof = np.zeros(szy*resamp)
fibre_offsets = np.zeros(nfibres)
for j in range(nfibres):
#Central pixel for this fibre image...
fibre_offsets[j] = (j- (nfibres-1)/2.0)*fsep
cpix = szy*resamp/2.0 + fibre_offsets[j]
for px in np.arange(np.floor(cpix-resamp),np.ceil(cpix+resamp+1)):
prof[px] = np.exp(-(px-cpix)**2/resamp**2.0)*flux[j]
#Now find the best match. Note that slitlet 1 starts from the top.
#!!! On this next line, the nonlinear process becomes important !!!
offsets = global_offset -nsearch/2.0 + resamp*(1 - c_nonlin/3.0*(i - (nslitlets-1)/2.0)**2)*(i-(nslitlets-1)/2.0)/nslitlets*szy*fillfrac + np.arange(nsearch)
offsets = offsets.astype(int)
for k in np.arange(ncuts):
xp = np.zeros(nsearch)
for j in range(nsearch):
xp[j] = np.sum(np.roll(prof,offsets[j])*cuts[k,:])
# print np.argmax(xp)
soffsets[i,k] = offsets[np.argmax(xp)]
#Great! At this point we have everything we need for a parabolic fit to the "tramline".
#Lets make this fit and write to file.
p = np.polyfit(fit_px,(soffsets[i,:] + szy*resamp/2.0)/float(resamp),2)
pp = np.poly1d(p)
#outf.write('{0:3.4e} {1:3.4e} {2:3.4e}\n'.format(p[0],p[1],p[2]+fibre_offsets[j]/resamp))
outf.write('{0:3.4e} {1:3.4e} {2:3.4e} {3:3.4e}\n'.format(p[0],p[1],p[2],fsep/resamp))
for j in range(nfibres):
if flux[j]>0:
if (j == 0):
plt.plot(x,pp(x)+fibre_offsets[j]/resamp,'r-')
else:
plt.plot(x,pp(x)+fibre_offsets[j]/resamp,'g-')
#import pdb; pdb.set_trace()
outf.close()
def compute_model_wavelengths(self,header):
"""Given a fits header and other fixed physical numbers, compute the model
wavelengths for the central fiber for HERMES.
Parameters
----------
header: pyfits header
Header of a file to compute the nominal wavelengths for.
Returns
-------
wavelengths: array
Wavelengths of the central fiber for each pixel in Angstroms.
"""
try:
gratlpmm = header['GRATLPMM']
except:
print("ERROR: Could not read grating parameter from header")
raise UserWarning
#A distortion estimate of 0.04 came from the slit image -
#somewhat flawed because there is distortion
#from both the collimator and camera. Distortion in the wavelength direction
#is only due to the camera. This is the distortion due to and angle of
#half a chip, i.e. x' = x/(1 + distortion*x^2)
#The best value for the wavelength direction is pretty close to 0...
distortion = 0.00
camfl = 1.7*190.0 #In mm
pixel_size = 0.015 #In mm
#The nominal central wavelength angle from the HERMES design.
#Unclear if 68.1 or 67.2 is the right number...
beta0 = np.radians(67.2)
gratangl = np.radians(67.2)
szx = header['WINDOXE1'] #This removes the overscan region.
center_pix = 2048
d = 1.0/gratlpmm
#First, convert pixel to x-angle (beta)
#Unlike Koala, we'll ignore gamma in the first instance.
#gamma_max = szy/2.0*pixel_size/camfl
dbeta = np.arange(szx,dtype=float) - center_pix
dbeta *= (1.0 + distortion*(dbeta/center_pix)**2)
dbeta *= pixel_size/camfl
beta = beta0 + dbeta
wavelengths = d*(np.sin(gratangl) + np.sin(beta))
#Return wavelengths in Angstroms.
return wavelengths * 1e7
def adjust_wavelengths(self, wavelengths, p):
"""Adjust a set of model wavelengths by a quadratic function,
used by find_arclines to find the best fit.
Parameters
----------
wavelengths: (nx) array
One-dimensional wavelength array
p: (3) array
p[0] is a quadratic term, with p[0]=1 giving a 1 Angstrom shift at the edges
of the wavelength array.
p[1] is a linear dispersion term, with p[1]=0.01 giving a 1% shift at the
edges of the wavelength array with respect to the center.
p[2] is a shift in pixels.
Returns
-------
wavelengths: (nx) array
One-dimensional wavelength array
"""
#Median wavelength.
medw = np.median(wavelengths)
#Delta wavelength from center to edge.
dw = max(wavelengths) - medw
wavelengths = p[0]*((wavelengths-medw)/dw)**2 + p[1]*(wavelengths-medw) + medw
wstep = wavelengths[1:]-wavelengths[:-1]
wstep = np.append(wstep, wstep[-1])
return wavelengths - p[2]*wstep
def find_arclines(self,arc, header):
""" Based on the model wavelength scale from degisn physical parameters only, try to find the
positions of the arc lines. This is not necessarily a robust program - hopefully it
only has to be run once, and the reasonable fit to the arc lines can then be input
(through the calbration directory) to fit_arclines.
Parameters
----------
arc: (nfibres*nslitlets, nwave) array
Extracted arc spectra
header: pyfits header
Header for the arc file.
Returns
-------
wavelengths: (nslitlets*nfibres, nx) array
Wavelengths of each pixel in Angstroms.
"""
#The following code originally from quick_image_gui.py for Koala. The philosophy is to do our best
#based on a physical model to put the arclines on to chip coordinates... to raise the
#arc fluxes to a small power after truncating any noise, then to maximise the
#cross-correlation function with reasonably broad library arc line functions.
arc_center = np.median(arc[170:230,:],axis=0)
arc_center = np.sqrt(np.maximum(arc_center - np.median(arc_center),0))
szx = arc.shape[1]
arclines = np.loadtxt(self.cdir + '../thxe.arc')
wavelengths = self.compute_model_wavelengths(header)
arc_ix = np.where( (arclines[:,0] > min(wavelengths) + 0.5) * ((arclines[:,0] < max(wavelengths) - 0.5)) )[0]
if len(arc_ix)==0:
print("Error: No arc lines within wavelength range!")
raise UserWarning
arclines = arclines[arc_ix,:]
g = np.exp(-(np.arange(15)-7.0)**2/30.0)
#Only consider offsets of up to 50 pixels.
npix_search = 120
nscale_search = 101
nquad_search = 15
scales = 1.0 + 0.0015*(np.arange(nscale_search) - nscale_search//2)
#Peak to valley in Angstroms.
quad = 0.2*(np.arange(nquad_search) - nquad_search//2)
corr3d = np.zeros( (nquad_search, nscale_search, 2*npix_search) )
print("Beginning search for optimal wavelength scaling...")
for j in range(nquad_search):
for i in range(nscale_search):
xcorr = np.zeros(szx)
pxarc = np.interp(arclines[:,0],self.adjust_wavelengths(wavelengths, [quad[j],scales[i],0]), np.arange(szx)).astype(int)
xcorr[pxarc] = np.sqrt(arclines[:,1])
xcorr = np.convolve(xcorr,g,mode='same')
corfunc=np.correlate(arc_center,xcorr,mode='same')
corr3d[j,i,:]=corfunc[szx//2-npix_search:szx//2+npix_search]
#if (i == 32):
# import pdb; pdb.set_trace()
pix_offset = np.unravel_index(corr3d.argmax(), corr3d.shape)
print("Max correlation: " + str(np.max(corr3d)))
#corfunc[szx//2+npix_search:] = 0
#plt.plot(np.arange(2*npix_search) - npix_search, corfunc[szx//2-npix_search:szx//2+npix_search])
#pix_offset = np.argmax(corfunc) - szx//2
plt.clf()
plt.imshow(corr3d[pix_offset[0],:,:], interpolation='nearest')
plt.title('Click to continue...')
plt.ginput(1)
xcorr = np.zeros(szx)
new_wavelengths = self.adjust_wavelengths(wavelengths, [quad[pix_offset[0]],scales[pix_offset[1]],pix_offset[2]-npix_search])
pxarc = np.interp(arclines[:,0],new_wavelengths,np.arange(szx)).astype(int)
pxarc = np.maximum(pxarc,0)
pxarc = np.minimum(pxarc,szx)
xcorr[pxarc] = np.sqrt(arclines[:,1])
xcorr = np.convolve(xcorr,g,mode='same')
plt.clf()
plt.plot(new_wavelengths, xcorr)
plt.plot(new_wavelengths, arc_center)
plt.xlabel('Wavelength')
#Now go through each slitlet (i.e. for a reliable median)
#Making appropriate adjustments to the scale.
nslitlets = 40
nfibres = 10
slitlet_shift = np.zeros(nslitlets)
for i in range(nslitlets):
arc_med = np.median(arc[i*nfibres:(i+1)*nfibres,:],axis=0)
arc_med = np.sqrt(np.maximum(arc_med - np.median(arc_med),0))
corfunc=np.correlate(arc_med,xcorr,mode='same')
corfunc[:szx//2-npix_search]=0
corfunc[szx//2+npix_search:]=0
slitlet_shift[i] = np.argmax(corfunc)-szx//2
print("Slitlet " + str(i) + " correlation " +str(np.max(corfunc)))
#Save a polynomial fit to the wavelengths versus pixel
#a_5 x^5 + a_4 x^4 + a_3 x^3 + a_2 x^2
# + b_4 x^4 y + b_3 x^3 y + b_2 x^2 y
# + c_3 x^3 y^2 + c_2 x^2 y^2
# + d_2 x^2 y^3
x_ix = np.arange(szx) - szx//2
poly_p = np.polyfit(x_ix,new_wavelengths,5)
wcen = poly_p[5]
disp = poly_p[4]
poly2dfit = np.append(poly_p[0:4],np.zeros(6))
np.savetxt(self.rdir + 'poly2d_p' + header['SOURCE'][6] + '.txt',poly2dfit, fmt='%.6e')
#The individual fibres have a pixel shift, which we convert to a wavelength shift
#at the chip center.
fibre_fits = np.zeros((nslitlets*nfibres,2))
fibre_fits[:,0] = disp * np.ones(nslitlets*nfibres)
#pixel shift multiplied by dlambda//dpix = dlambda
fibre_fits[:,1] = wcen - np.repeat(slitlet_shift,nfibres) * disp
np.savetxt(self.rdir + 'dispwave_p' + header['SOURCE'][6] + '.txt',fibre_fits, fmt='%.6e')
return new_wavelengths
def find_wavelengths(self, poly2dfit, fibre_fits, nx):
"""Find the wavelengths for all fibers and all pixels, based on the model that
includes the polynomial 2D fit ant the linear fiber fits.
Parameters
----------
poly2dfit: (10) array
2D polynomial fit parameters.
fiber_fits: (nfibres*nslitlets, 2) array
Linear dispersion fit to each fiber
nx: int
Number of pixels in the x (dispersion, i.e. wavelength) direction
Returns
-------
wavelengths: (nslitlets*nfibres, nx) array
Wavelengths of each pixel in Angstroms.
"""
nslitlets=40 #!!! This should be a property of the main class.
nfibres=10
wavelengths = np.zeros( (nslitlets*nfibres, nx) )
x_ix = np.arange(nx) - nx//2
y_ix = np.arange(nslitlets*nfibres) - nslitlets*nfibres//2
xy_ix = np.meshgrid(x_ix,y_ix)
#Start with the linear component of the wavelength scale.
for i in range(nslitlets*nfibres):
wavelengths[i,:] = fibre_fits[i,0] * x_ix + fibre_fits[i,1]
#As we have a 2D polynomial, bite the bullet and just manually create the
#main offset...
poly_func = np.poly1d(np.append(poly2dfit[0:4],[0,0]))
wavelengths += poly_func(xy_ix[0])
poly_func = np.poly1d(np.append(poly2dfit[4:7],[0,0]))
wavelengths += poly_func(xy_ix[0])*xy_ix[1]
poly_func = np.poly1d(np.append(poly2dfit[7:9],[0,0]))
wavelengths += poly_func(xy_ix[0])*xy_ix[1]**2
wavelengths += poly2dfit[9]*xy_ix[0]**2*xy_ix[1]**3
return wavelengths
def fit_arclines(self,arc, header, plotit=False, npix_extract = 51):
"""Assuming that the initial model is good enough, fit to the arclines.
Whenever an fibre isn't in use, this routine will take the fibre_fits from the
two nearest good fibres in the slitlet.
npix_extract:
Maximum number of pixels for extraction, including tramline tilt.
The procedure is to:
0) Based on a model, find the wavelength of every pixel.
1) First find the x pixels corresponding to the model arc lines, as well
as the local dispersion at each line (arc_x and arc_disp).
2) Create a matrix such that wave = M * p, with p our parameters.
3) Find the dx values, convert to dwave.
4) Convert the dwave values to dp.
Parameters
----------
arc: array
Extracted arc spectrum
header: pyfits header
Header of the arc file
plotit: boolean (default False)
Do we show the arc line fits?
npix_extract: int (default 51)
Number of pixels to extract in the fitting of the arc line.
Returns
-------
wavelengths: (nslitlets*nfibres, nx) array
Wavelengths of each pixel in Angstroms.
"""
nslitlets=40
nfibres=10
#Oversampling of the PSF - should be an odd number due to symmetrical
#convolutions.
oversamp = 3
delx_deriv = 0.01
flux_min = 20
flux_max = 200
nx = arc.shape[1]
poly2dfit = np.loadtxt(self.cdir + 'poly2d_p' + header['SOURCE'][6] + '.txt')
npoly_p = len(poly2dfit) #!!! Has to be 10 for the code below so far.
fibre_fits = np.loadtxt(self.cdir + 'dispwave_p' + header['SOURCE'][6] + '.txt')
wavelengths = self.find_wavelengths(poly2dfit, fibre_fits, nx)
#Read in the arc file...
arclines = np.loadtxt(self.cdir + '../thxe.arc')
arc_ix = np.where( (arclines[:,0] > np.min(wavelengths) + 0.5) * ((arclines[:,0] < np.max(wavelengths) - 0.5)) )[0]
arclines = arclines[arc_ix,:]
narc = len(arc_ix)
#Initialise the arc x and dispersion values...
arc_x = np.zeros( (nfibres*nslitlets,narc) )
arc_disp = np.zeros( (nfibres*nslitlets,narc) )
#Find the x pixels corresponding to each wavelength.
#PSF stuff...
psf = self.make_psf(npix=npix_extract, oversamp=oversamp)
#e_ix is the extraction index.
e_ix_oversamp = np.arange(oversamp*npix_extract) - oversamp*npix_extract//2
e_ix = ( np.arange(npix_extract) - npix_extract//2 )*oversamp
psfim_plus = np.zeros((npix_extract, nslitlets*nfibres))
psfim_minus = np.zeros((npix_extract, nslitlets*nfibres))
#Indices for later...
y_ix = np.arange(nslitlets*nfibres) - nslitlets*nfibres//2
y_ix = np.repeat(y_ix,narc).reshape(nfibres*nslitlets,narc)
x_ix = np.arange(nx) - nx//2
arcline_matrix = np.zeros((nfibres*nslitlets, narc,npoly_p + 2*nfibres*nslitlets))
#Whoa! That was tricky. Now lets use our PSF to fit for the arc lines.
dx = np.zeros((nslitlets*nfibres,narc))
weight_dx = np.zeros((nslitlets*nfibres,narc))
for count in range(0,3):
wavelengths = self.find_wavelengths(poly2dfit, fibre_fits, nx)
#Find the arc_x values...
#Dispersion in the conventional sense, i.e. dlambda/dx
for i in range(nfibres*nslitlets):
xplus = np.interp(arclines[:,0] + delx_deriv, wavelengths[i,:], x_ix)
xminus = np.interp(arclines[:,0] - delx_deriv, wavelengths[i,:], x_ix)
arc_x[i,:] = 0.5*(xplus + xminus)
arc_disp[i,:] = 2.0*delx_deriv/(xplus - xminus)
#Make a matrix that maps model parameters to wavelengths, based on the arc_x values.
#(nfibres*nslitlets,narc,npoly_p + 2*nfibres*nslitlets)
arcline_matrix = arcline_matrix.reshape(nfibres*nslitlets, narc,npoly_p + 2*nfibres*nslitlets)
arcline_matrix[:,:,0] = arc_x**5
arcline_matrix[:,:,1] = arc_x**4
arcline_matrix[:,:,2] = arc_x**3
arcline_matrix[:,:,3] = arc_x**2
arcline_matrix[:,:,4] = arc_x**4*y_ix
arcline_matrix[:,:,5] = arc_x**3*y_ix
arcline_matrix[:,:,6] = arc_x**2*y_ix
arcline_matrix[:,:,7] = arc_x**3*y_ix**2
arcline_matrix[:,:,8] = arc_x**2*y_ix**2
arcline_matrix[:,:,9] = arc_x**2*y_ix**3
for i in range(nfibres*nslitlets):
arcline_matrix[i,:,npoly_p+i] = arc_x[i,:]
arcline_matrix[i,:,npoly_p+nfibres*nslitlets + i] = 1.0
arcline_matrix = arcline_matrix.reshape(nfibres*nslitlets*narc,npoly_p + 2*nfibres*nslitlets)
#!!! Sanity check that this matrix actually works...
#p_all = np.append(poly2dfit, np.transpose(fibre_fits).flatten())
#wavelengths_test = np.dot(arcline_matrix,p_all)
#import pdb; pdb.set_trace()
for i in range(narc):
#Find the range pixel values that correspond to the arc lines for all
#fibers.
center_int = int(np.median(arc_x[:,i]) + nx//2)
subim = np.zeros((arc.shape[0], npix_extract))
subim[:,np.maximum(npix_extract//2 - center_int,0):\
np.minimum(arc.shape[1]-center_int-npix_extract//2-1,arc.shape[1])] = \
arc[:,np.maximum(center_int - npix_extract//2,0):np.minimum(center_int + npix_extract//2+1,arc.shape[1])]
subim = subim.T
#Start off with a slow interpolation for simplicity.
for k in range(nslitlets*nfibres):
offset = arc_x[k,i] - center_int + nx//2
psfim_plus[:,k] = np.interp(e_ix - (offset + delx_deriv)*oversamp, e_ix_oversamp, psf)
psfim_minus[:,k] = np.interp(e_ix - (offset - delx_deriv)*oversamp, e_ix_oversamp, psf)
psfim = 0.5*(psfim_plus + psfim_minus)
psfim_deriv = ( psfim_plus - psfim_minus )/2.0/delx_deriv
psfsum = np.sum(psfim*subim,axis=0)
dx[:,i] = np.sum(psfim_deriv*subim,axis=0)/np.maximum(psfsum,flux_min)*np.sum(psfim**2)/np.sum(psfim_deriv**2)
weight_dx[:,i] = np.maximum(psfsum-flux_min,1e-3)
weight_dx[:,i] = np.minimum(weight_dx[:,i],1.5*np.median(weight_dx[:,i]))
weight_dx[:,i] = np.minimum(weight_dx[:,i],flux_max)
if count > 0 and plotit:
plt.clf()
plt.imshow(psfim, aspect='auto', interpolation='nearest')
plt.draw()
plt.imshow(np.minimum(subim,np.mean(subim)*10), aspect='auto', interpolation='nearest')
plt.draw()
ww = np.where(weight_dx > 0)
print("RMS arc offset in pix (iteration " +str(count)+ "): " + str(np.sqrt(np.mean(dx[ww]**2))))
#For fibres with low flux, set dx to the median of the fibers around.
med_weight = np.median(weight_dx, axis=1)
ww = np.where(med_weight < 0.3*np.median(med_weight))[0]
dx[ww] = (nd.filters.median_filter(dx,size=3))[ww]
#Now convert the dx values to dwave.
dwave = (dx * arc_disp).reshape(nslitlets*nfibres*narc)
#That was easier than I thought it would be! Next, we have to do the linear fit to the
#dwave values
W = np.diag(weight_dx.flatten())
#So the model here is:
#delta_wavelengths = arcline_matrix . delta_p
delta_p = np.linalg.solve(np.dot(np.transpose(arcline_matrix),np.dot(W,arcline_matrix)) ,\
np.dot(np.transpose(arcline_matrix),np.dot(W,dwave)) )
poly2dfit -= delta_p[0:npoly_p]
fibre_fits[:,0] -= delta_p[npoly_p:npoly_p + nfibres*nslitlets]
fibre_fits[:,1] -= delta_p[npoly_p + nfibres*nslitlets:]
# pdb.set_trace() #!!! XXX
#Finally, go through the slitlets and fix the fibre fits for low SNR arcs (e.g. dead fibres)
med_weight = med_weight.reshape((nslitlets,nfibres))
fibre_fits = fibre_fits.reshape((nslitlets,nfibres,2))
fib_ix = np.arange(nfibres)
for i in range(nslitlets):
ww = np.where(med_weight[i,:] < 0.3*np.median(med_weight[i,:]))[0]
if len(ww)>0:
for wbad in ww:
nearby_fib = np.where( (wbad - fib_ix) < 4)[0]
fibre_fits[i,wbad,0] = np.median(fibre_fits[i,nearby_fib,0])
fibre_fits[i,wbad,1] = np.median(fibre_fits[i,nearby_fib,1])
fibre_fits = fibre_fits.reshape((nslitlets*nfibres,2))
#Save our fits!
np.savetxt(self.rdir + 'poly2d_p' + header['SOURCE'][6] + '.txt',poly2dfit, fmt='%.6e')
np.savetxt(self.rdir + 'dispwave_p' + header['SOURCE'][6] + '.txt',fibre_fits, fmt='%.6e')
return wavelengths
def worker(arm, skip_done=False):
"""Trivial function needed for multi-threading."""
arm.go(skip_done=skip_done)
return
def go_all(ddir_root, rdir_root, cdir_root, gdir_root='',skip_done=False):
"""Process all CCDs in a default way
Parameters
----------
ddir_root: string
Data directory - should contain subdirectories ccd_1, ccd_2 etc
rdir_root: string
Reduction directory root - should contain subdirectories ccd_1, ccd_2 etc
cdir_root: string
Calibration directory root - this is likely CODE_DIRECTORY/cal. Surely this can
be made a default!
"""
#Create directories if they don't already exist.
if os.path.isfile(rdir_root):
print("ERROR: reduction directory already exists as a file!")
raise UserWarning
if not os.path.isdir(rdir_root):
try:
os.mkdir(rdir_root)
except:
print("ERROR: Could not create directory " + rdir_root)
raise UserWarning
ccds = ['ccd_1', 'ccd_2', 'ccd_3', 'ccd_4']
arms = []
if gdir_root=='':
gdir_root = rdir_root
for ccd in ccds:
if not os.path.isdir(rdir_root + '/' + ccd):
os.mkdir(rdir_root + '/' + ccd)
arms.append(HERMES(ddir_root + '/' + ccd+ '/', rdir_root + '/' + ccd + '/', cdir_root + '/' + ccd + '/',gdir=gdir_root))
threads = []
for ix,arm in enumerate(arms):
t = Process(target=worker, args=(arm,skip_done))
t.name = ccds[ix]
# t = threading.Thread(target=worker, args=(arm,))
threads.append(t)
t.start()
for t in threads:
t.join()
print("Finished process: " + t.name)
| mit |
victorbergelin/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
vrieni/orange | Orange/clustering/kmeans.py | 6 | 21404 | """
*******************************
K-means clustering (``kmeans``)
*******************************
.. index::
single: clustering, kmeans
.. index:: agglomerative clustering
.. autoclass:: Orange.clustering.kmeans.Clustering(data=None, centroids=3, maxiters=None, minscorechange=None, stopchanges=0, nstart=1, initialization=init_random, distance=Orange.distance.Euclidean, scoring=score_distance_to_centroids, inner_callback=None, outer_callback=None)
:members:
:exclude-members: __init__
.. automethod:: __init__(data=None, centroids=3, maxiters=None, minscorechange=None, stopchanges=0, nstart=1, initialization=init_random, distance=Orange.distance.Euclidean, scoring=score_distance_to_centroids, inner_callback=None, outer_callback=None)
Examples
========
The following code runs k-means clustering and prints out the cluster indexes
for the last 10 data instances (:download:`kmeans-run.py <code/kmeans-run.py>`):
.. literalinclude:: code/kmeans-run.py
The output of this code is::
[1, 1, 2, 1, 1, 1, 2, 1, 1, 2]
Invoking a call-back function may be useful when tracing the progress of the clustering.
Below is a code that uses an :obj:`inner_callback` to report on the number of instances
that have changed the cluster and to report on the clustering score. For the score
o be computed at each iteration we have to set :obj:`minscorechange`, but we can
leave it at 0 or even set it to a negative value, which allows the score to deteriorate
by some amount (:download:`kmeans-run-callback.py <code/kmeans-run-callback.py>`):
.. literalinclude:: code/kmeans-run-callback.py
The convergence on Iris data set is fast::
Iteration: 1, changes: 150, score: 10.9555
Iteration: 2, changes: 12, score: 10.3867
Iteration: 3, changes: 2, score: 10.2034
Iteration: 4, changes: 2, score: 10.0699
Iteration: 5, changes: 2, score: 9.9542
Iteration: 6, changes: 1, score: 9.9168
Iteration: 7, changes: 2, score: 9.8624
Iteration: 8, changes: 0, score: 9.8624
Call-back above is used for reporting of the progress, but may as well call a function that plots a selection data projection with corresponding centroid at a given step of the clustering. This is exactly what we did with the following script (:download:`kmeans-trace.py <code/kmeans-trace.py>`):
.. literalinclude:: code/kmeans-trace.py
Only the first four scatterplots are shown below. Colors of the data instances indicate the cluster membership. Notice that since the Iris data set includes four attributes, the closest centroid in a particular 2-dimensional projection is not necessary also the centroid of the cluster that the data point belongs to.
.. image:: files/kmeans-scatter-001.png
.. image:: files/kmeans-scatter-002.png
.. image:: files/kmeans-scatter-003.png
.. image:: files/kmeans-scatter-004.png
k-Means Utility Functions
=========================
.. automethod:: Orange.clustering.kmeans.init_random
.. automethod:: Orange.clustering.kmeans.init_diversity
.. autoclass:: Orange.clustering.kmeans.init_hclustering
:members:
.. automethod:: Orange.clustering.kmeans.plot_silhouette
.. automethod:: Orange.clustering.kmeans.score_distance_to_centroids
.. automethod:: Orange.clustering.kmeans.score_silhouette
.. automethod:: Orange.clustering.kmeans.score_fast_silhouette
Typically, the choice of seeds has a large impact on the k-means clustering,
with better initialization methods yielding a clustering that converges faster
and finds more optimal centroids. The following code compares three different
initialization methods (random, diversity-based and hierarchical clustering-based)
in terms of how fast they converge (:download:`kmeans-cmp-init.py <code/kmeans-cmp-init.py>`):
.. literalinclude:: code/kmeans-cmp-init.py
As expected, k-means converges faster with diversity and clustering-based
initialization that with random seed selection::
Rnd Div HC
iris 12 3 4
housing 14 6 4
vehicle 11 4 3
The following code computes the silhouette score for k=2..7 and plots a
silhuette plot for k=3 (:download:`kmeans-silhouette.py <code/kmeans-silhouette.py>`):
.. literalinclude:: code/kmeans-silhouette.py
The analysis suggests that k=2 is preferred as it yields
the maximal silhouette coefficient::
2 0.629467553352
3 0.504318855054
4 0.407259377854
5 0.358628975081
6 0.353228492088
7 0.366357876944
.. figure:: files/kmeans-silhouette.png
Silhouette plot for k=3.
"""
import math
import sys
import random
from Orange import statc
import Orange.clustering.hierarchical
import Orange
# miscellaneous functions
def _modus(dist):
#Check bool(dist) - False means no known cases
#Check dist.cases > 0 - We cant return some value from the domain without knowing if it is even present
#in the data. TOOD: What does this mean for k-means convergence?
if bool(dist) and dist.cases > 0:
return dist.modus()
else:
return None
def data_center(data):
"""
Returns a center of the instances in the data set (average across data instances for continuous attributes, most frequent value for discrete attributes).
"""
atts = data.domain.attributes
astats = Orange.statistics.basic.Domain(data)
center = [astats[a].avg if a.varType == Orange.feature.Type.Continuous \
# else max(enumerate(orange.Distribution(a, data)), key=lambda x:x[1])[0] if a.varType == orange.VarTypes.Discrete
else _modus(Orange.statistics.distribution.Distribution(a, data)) if a.varType == Orange.feature.Type.Discrete
else None
for a in atts]
if data.domain.classVar:
center.append(0)
return Orange.data.Instance(data.domain, center)
def minindex(x):
"""Return the index of the minimum element"""
return x.index(min(x))
def avg(x):
"""Return the average (mean) of a given list"""
return (float(sum(x)) / len(x)) if x else 0
#
# data distances
#
# k-means clustering
# clustering scoring functions
def score_distance_to_centroids(km):
"""Returns an average distance of data instances to their associated centroids.
:param km: a k-means clustering object.
:type km: :class:`KMeans`
"""
return sum(km.distance(km.centroids[km.clusters[i]], d) for i,d in enumerate(km.data))
score_distance_to_centroids.minimize = True
def score_conditional_entropy(km):
"""UNIMPLEMENTED cluster quality measured by conditional entropy"""
raise NotImplemented
def score_within_cluster_distance(km):
"""UNIMPLEMENTED weighted average within-cluster pairwise distance"""
raise NotImplemented
score_within_cluster_distance.minimize = True
def score_between_cluster_distance(km):
"""Sum of distances from elements to 'nearest miss' centroids"""
return sum(min(km.distance(c, d) for j,c in enumerate(km.centroids) if j!=km.clusters[i]) for i,d in enumerate(km.data))
from Orange.utils import deprecated_function_name
score_betweenClusterDistance = deprecated_function_name(score_between_cluster_distance)
def score_silhouette(km, index=None):
"""Returns an average silhouette score of data instances.
:param km: a k-means clustering object.
:type km: :class:`KMeans`
:param index: if given, the functon returns just the silhouette score of that particular data instance.
:type index: integer
"""
if index == None:
return avg([score_silhouette(km, i) for i in range(len(km.data))])
cind = km.clusters[index]
a = avg([km.distance(km.data[index], ex) for i, ex in enumerate(km.data) if
km.clusters[i] == cind and i != index])
b = min([avg([km.distance(km.data[index], ex) for i, ex in enumerate(km.data) if
km.clusters[i] == c])
for c in range(len(km.centroids)) if c != cind])
return float(b - a) / max(a, b) if max(a, b) > 0 else 0.0
def score_fast_silhouette(km, index=None):
"""Same as score_silhouette, but computes an approximation and is faster.
:param km: a k-means clustering object.
:type km: :class:`KMeans`
"""
if index == None:
return avg([score_fast_silhouette(km, i) for i in range(len(km.data))])
cind = km.clusters[index]
a = km.distance(km.data[index], km.centroids[km.clusters[index]])
b = min([km.distance(km.data[index], c) for i,c in enumerate(km.centroids) if i != cind])
return float(b - a) / max(a, b) if max(a, b) > 0 else 0.0
def compute_bic(km):
"""Compute bayesian information criteria score for given clustering. NEEDS REWRITE!!!"""
data = km.data
medoids = km.centroids
M = len(data.domain.attributes)
R = float(len(data))
Ri = [km.clusters.count(i) for i in range(km.k)]
numFreePar = (len(km.data.domain.attributes) + 1.) * km.k * math.log(R, 2.) / 2.
# sigma**2
s2 = 0.
cidx = [i for i, attr in enumerate(data.domain.attributes) if attr.varType in [Orange.feature.Type.Continuous, Orange.feature.Type.Discrete]]
for x, midx in izip(data, mapping):
medoid = medoids[midx] # medoids has a dummy element at the beginning, so we don't need -1
s2 += sum( [(float(x[i]) - float(medoid[i]))**2 for i in cidx] )
s2 /= (R - K)
if s2 < 1e-20:
return None, [None]*K
# log-lokehood of clusters: l(Dn)
# log-likehood of clustering: l(D)
ld = 0
bicc = []
for k in range(1, 1+K):
ldn = -1. * Ri[k] * ((math.log(2. * math.pi, 2) / -2.) - (M * math.log(s2, 2) / 2.) + (K / 2.) + math.log(Ri[k], 2) - math.log(R, 2))
ld += ldn
bicc.append(ldn - numFreePar)
return ld - numFreePar, bicc
#
# silhouette plot
#
def plot_silhouette(km, filename='tmp.png', fast=False):
""" Saves a silhuette plot to filename, showing the distributions of silhouette scores in clusters. kmeans is a k-means clustering object. If fast is True use score_fast_silhouette to compute scores instead of score_silhouette.
:param km: a k-means clustering object.
:type km: :class:`KMeans`
:param filename: name of output plot.
:type filename: string
:param fast: if True use :func:`score_fast_silhouette` to compute scores instead of :func:`score_silhouette`
:type fast: boolean.
"""
import matplotlib.pyplot as plt
plt.figure()
scoring = score_fast_silhouette if fast else score_silhouette
scores = [[] for i in range(km.k)]
for i, c in enumerate(km.clusters):
scores[c].append(scoring(km, i))
csizes = map(len, scores)
cpositions = [sum(csizes[:i]) + (i+1)*3 + csizes[i]/2 for i in range(km.k)]
scores = reduce(lambda x,y: x + [0]*3 + sorted(y), scores, [])
plt.barh(range(len(scores)), scores, linewidth=0, color='c')
plt.yticks(cpositions, map(str, range(km.k)))
#plt.title('Silhouette plot')
plt.ylabel('Cluster')
plt.xlabel('Silhouette value')
plt.savefig(filename)
# clustering initialization (seeds)
# initialization functions should be of the type f(data, k, distfun)
def init_random(data, k, _):
"""A function that can be used for initialization of k-means clustering returns k data instances from the data. This type of initialization is also known as Fory's initialization (Forgy, 1965; He et al., 2004).
:param data: data instances.
:type data: :class:`orange.ExampleTable`
:param k: the number of clusters.
:type k: integer
"""
return data.getitems(random.sample(range(len(data)), k))
def init_diversity(data, k, distfun):
"""A function that can be used for intialization of k-means clustering. Returns a set of centroids where the first one is a data point being the farthest away from the center of the data, and consequent centroids data points of which the minimal distance to the previous set of centroids is maximal. Differs from the initialization proposed by Katsavounidis et al. (1994) only in the selection of the first centroid (where they use a data instance with the highest norm).
:param data: data instances.
:type data: :class:`orange.ExampleTable`
:param k: the number of clusters.
:type k: integer
:param distfun: a distance function.
:type distfun: :class:`Orange.distance.Distance`
"""
center = data_center(data)
# the first seed should be the farthest point from the center
seeds = [max([(distfun(d, center), d) for d in data])[1]]
# other seeds are added iteratively, and are data points that are farthest from the current set of seeds
for i in range(1,k):
seeds.append(max([(min([distfun(d, s) for s in seeds]), d) for d in data if d not in seeds])[1])
return seeds
class init_hclustering():
"""
A class that returns an clustering initialization function that performs
hierarhical clustering, uses it to infer k clusters, and computes a
list of cluster-based data centers
"""
def __init__(self, n=100):
"""
:param n: number of data instances to sample.
:type n: integer
"""
self.n = n
def __call__(self, data, k, disfun):
"""
:param data: data instances.
:type data: :class:`orange.ExampleTable`
:param k: the number of clusters.
:type k: integer
:param distfun: a distance function.
:type distfun: :class:`Orange.distance.Distance`
"""
sample = Orange.data.Table(random.sample(data, min(self.n, len(data))))
root = Orange.clustering.hierarchical.clustering(sample)
cmap = Orange.clustering.hierarchical.top_clusters(root, k)
return [data_center(Orange.data.Table([sample[e] for e in cl])) for cl in cmap]
#
# k-means clustering, main implementation
#
class Clustering:
"""Implements a k-means clustering algorithm:
#. Choose the number of clusters, k.
#. Choose a set of k initial centroids.
#. Assign each instances in the data set to the closest centroid.
#. For each cluster, compute a new centroid as a center of clustered
data instances.
#. Repeat the previous two steps, until some convergence criterion is
met (e.g., the cluster assignment has not changed).
The main advantages of this algorithm are simplicity and low memory
requirements. The principal disadvantage is the dependence of results
on the selection of initial set of centroids.
.. attribute:: k
Number of clusters.
.. attribute:: data
Instances to cluster.
.. attribute:: centroids
Current set of centroids.
.. attribute:: scoring
Current clustering score.
.. attribute:: iteration
Current clustering iteration.
.. attribute:: clusters
A list of cluster indexes. An i-th element provides an
index to a centroid associated with i-th data instance from the input
data set.
"""
def __init__(self, data=None, centroids=3, maxiters=None, minscorechange=None,
stopchanges=0, nstart=1, initialization=init_random,
distance=Orange.distance.Euclidean,
scoring=score_distance_to_centroids, inner_callback=None,
outer_callback=None):
"""
:param data: Data instances to be clustered. If not None, clustering will be executed immediately after initialization unless ``initialize_only=True``.
:type data: :class:`~Orange.data.Table` or None
:param centroids: either specify a number of clusters or provide a list of examples that will serve as clustering centroids.
:type centroids: :obj:`int` or :obj:`list` of :class:`~Orange.data.Instance`
:param nstart: If greater than one, nstart runs of the clustering algorithm will be executed, returning the clustering with the best (lowest) score.
:type nstart: int
:param distance: an example distance constructor, which measures the distance between two instances.
:type distance: :class:`~Orange.distance.DistanceConstructor`
:param initialization: a function to select centroids given data instances, k and a example distance function. This module implements different approaches (:obj:`init_random`, :obj:`init_diversity`, :obj:`init_hclustering`).
:param scoring: a function that takes clustering object and returns the clustering score. It could be used, for instance, in procedure that repeats the clustering nstart times, returning the clustering with the lowest score.
:param inner_callback: invoked after every clustering iteration.
:param outer_callback: invoked after every clustering restart (if nstart is greater than 1).
Stopping criteria:
:param maxiters: maximum number of clustering iterations
:type maxiters: integer
:param minscorechange: minimal improvement of the score from previous generation (if lower, the clustering will stop). If None, the score will not be computed between iterations
:type minscorechange: float or None
:param stopchanges: if the number of instances changing the cluster is lower or equal to stopchanges, stop the clustering.
:type stopchanges: integer
"""
self.data = data
self.k = centroids if type(centroids)==int else len(centroids)
self.centroids = centroids if type(centroids) == Orange.data.Table else None
self.maxiters = maxiters
self.minscorechange = minscorechange
self.stopchanges = stopchanges
self.nstart = nstart
self.initialization = initialization
self.distance_constructor = distance
self.distance = self.distance_constructor(self.data) if self.data else None
self.scoring = scoring
self.minimize_score = True if hasattr(scoring, 'minimize') else False
self.inner_callback = inner_callback
self.outer_callback = outer_callback
if self.data:
self.run()
def __call__(self, data = None):
"""Runs the k-means clustering algorithm, with optional new data."""
if data:
self.data = data
self.distance = self.distance_constructor(self.data)
self.run()
def init_centroids(self):
"""Initialize cluster centroids"""
if self.centroids and not self.nstart > 1: # centroids were specified
return
self.centroids = self.initialization(self.data, self.k, self.distance)
def compute_centeroid(self, data):
"""Return a centroid of the data set."""
return data_center(data)
def compute_cluster(self):
"""calculate membership in clusters"""
return [minindex([self.distance(s, d) for s in self.centroids]) for d in self.data]
def runone(self):
"""Runs a single clustering iteration, starting with re-computation of centroids, followed by computation of data membership (associating data instances to their nearest centroid)."""
self.centroids = [self.compute_centeroid(self.data.getitems(
[i for i, c in enumerate(self.clusters) if c == cl])) for cl in range(self.k)]
self.clusters = self.compute_cluster()
def run(self):
"""
Runs clustering until the convergence conditions are met. If nstart is greater than one, nstart runs of the clustering algorithm will be executed, returning the clustering with the best (lowest) score.
"""
self.winner = None
for startindx in range(self.nstart):
self.init_centroids()
self.clusters = old_cluster = self.compute_cluster()
if self.minscorechange != None:
self.score = old_score = self.scoring(self)
self.nchanges = len(self.data)
self.iteration = 0
stopcondition = False
if self.inner_callback:
self.inner_callback(self)
while not stopcondition:
self.iteration += 1
self.runone()
self.nchanges = sum(map(lambda x,y: x!=y, old_cluster, self.clusters))
old_cluster = self.clusters
if self.minscorechange != None:
self.score = self.scoring(self)
scorechange = (self.score - old_score) / old_score if old_score > 0 else self.minscorechange
if self.minimize_score:
scorechange = -scorechange
old_score = self.score
stopcondition = (self.nchanges <= self.stopchanges or
self.iteration == self.maxiters or
(self.minscorechange != None and
scorechange <= self.minscorechange))
if self.inner_callback:
self.inner_callback(self)
if self.scoring and self.minscorechange == None:
self.score = self.scoring(self)
if self.nstart > 1:
if not self.winner or (self.score < self.winner[0] if
self.minimize_score else self.score > self.winner[0]):
self.winner = (self.score, self.clusters, self.centroids)
if self.outer_callback:
self.outer_callback(self)
if self.nstart > 1:
self.score, self.clusters, self.centroids = self.winner
| gpl-3.0 |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/numpy/linalg/linalg.py | 11 | 77339 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones
)
from numpy.core.multiarray import normalize_axis_index
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
# For Python2/3 compatibility
_N = b'N'
_V = b'V'
_A = b'A'
_S = b'S'
_L = b'L'
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assertRankAtLeast2(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _isEmpty2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def _assertNoEmpty2d(*arrays):
for a in arrays:
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
gufunc = _umath_linalg.solve1
else:
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.44721360-0.j , -0.89442719+0.j ],
[ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]])
array([[ 0.89442719+0.j , 0.00000000-0.4472136j],
[ 0.00000000-0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (..., M, N)} array_like
input vector or stack of matrices
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
return (S > tol).sum(axis=-1)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
if _isEmpty2d(a):
res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
_assertNoEmpty2d(a, b) # TODO: relax this constraint
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
# This line:
# * is incorrect, according to the LAPACK documentation
# * raises a ValueError if min(m,n) == 0
# * should not be calculated here anyway, as LAPACK should calculate
# `liwork` for us. But that only works if our version of lapack does
# not have this bug:
# http://icl.cs.utk.edu/lapack-forum/archives/lapack/msg00899.html
# Lapack_lite does have that bug...
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Notes
-----
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| apache-2.0 |
RomainBrault/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
juhi24/baecc | scripts/scr_velfitgroup.py | 1 | 3594 | # -*- coding: utf-8 -*-
"""
@author: Jussi Tiira
"""
from snowfall import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from os import path
def rangecolor(rho, rholimits=(150, 300), colors=('r', 'b', 'c', 'g', 'm')):
for i, rhomax in enumerate(rholimits):
if rho < rhomax:
return colors[i]
return colors[i+1]
def rho_range_str(rho, rholimits=(150, 300)):
rhomax_old = ''
for rhomax in rholimits:
rhomin = rhomax_old
rhomax_old = str(rhomax) + '<'
if rho < rhomax:
return '%srho<%s' % (rhomin, rhomax)
return 'rho>%s' % rhomax
def segment_index(val, limits=(150, 300)):
for i, maxval in enumerate(limits):
if val < maxval:
return i
return i+1
def normalize(val, maxval=500):
if val>maxval:
return 1
return val/maxval
plt.ioff()
plt.close('all')
dtformat_default = '%d.%m. %H:%M'
dtformat_snex = '%Y %d %B %H UTC'
e = EventsCollection('cases/pip2015.csv', dtformat_snex)
e.autoimport_data(autoshift=False, autobias=False, rule='6min', varinterval=True)
#ax = plt.gca()
#cmap = mpl.cm.gnuplot
#norm = mpl.colors.Normalize(vmin=0,vmax=500)
#cb = mpl.colorbar.ColorbarBase(ax, cmap=cmap, norm=norm)
rholimits = (150, 300)
combined = False
subplots = True
outpath = '../results/pip2015/velfitgroups2/'
extra = ''
subpath = ''
if subplots:
subpath = 'subplots/'
combined = False
elif combined:
subpath = 'combined/'
for c in np.append(e.events.pluvio200.values, e.events.pluvio400.values):
c.instr['pluvio'].shift_periods = -6
if combined:
fig = plt.figure()
colorstr = c.density().apply(rangecolor, rholimits=rholimits)
colorval = c.density().apply(normalize)
colorstr.name = 'colorstr'
colorval.name = 'colorval'
merged = read.merge_multiseries(c.pipv.fits, colorstr, colorval, c.density())
#merged.apply(lambda row: row.polfit.plot(color=row.colorstr, linewidth=1, xmax=10), axis=1)
groups = merged.groupby(colorstr)
if subplots:
fig, axarr = plt.subplots(ncols=len(rholimits)+1, figsize=(22,6),
sharex='all', sharey='all')
for name, group in groups:
rho = group.density.mean()
alpha = 1
i = segment_index(rho, limits=rholimits)
rhorange = rho_range_str(rho, rholimits=rholimits)
if not subplots:
if combined:
ax = plt.gca()
alpha = 0.2
else:
fig, ax = plt.subplots()
extra = '_' + rhorange
else:
ax = axarr[i]
group.apply(lambda row: row.polfit.plot(ax=ax, linewidth=0.5, color=name, alpha=alpha), axis=1)
ax.set_xlim([0, 10])
dtstr = str(c.dt_start_end()[0].date())
ax.set_ylim((0,3))
ax.set_title('%s, %s (%s)' % (dtstr, rhorange, c.instr['pluvio'].name))
if not (subplots and i>0):
ax.set_ylabel('fall velocity')
ax.set_xlabel('equivalent diameter')
ax.grid(axis='y')
if subplots:
fig.subplots_adjust(hspace=0)
plt.setp([a.get_yticklabels() for a in fig.axes[1:]], visible=False)
elif not combined:
plt.savefig(read.ensure_dir(outpath + subpath) + c.instr['pluvio'].name + '_' + dtstr + extra + '.eps')
if subplots:
fig.subplots_adjust(wspace=0)
plt.setp([a.get_yticklabels() for a in fig.axes[1:]], visible=False)
plt.savefig(read.ensure_dir(outpath + subpath) + c.instr['pluvio'].name + '_' + dtstr + extra + '.eps') | gpl-3.0 |
MarcusJones/ExergyUtilities | Migration/Custom/.jupyter/jupyter_notebook_config.py | 2 | 27763 | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Allow password to be changed at login for the notebook server.
#
# While loggin in with a token, the notebook server UI will give the opportunity
# to the user to enter a new password at the same time that will replace the
# token login mechanism.
#
# This can be set to false to prevent changing password from the UI/API.
#c.NotebookApp.allow_password_change = True
## Whether to allow the user to run the notebook as root.
#c.NotebookApp.allow_root = False
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
#c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## handlers that should be loaded at higher priority than the default services
#c.NotebookApp.extra_services = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which stream output can be sent on iopub before
# they are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
#c.NotebookApp.ip = 'localhost'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
#c.NotebookApp.notebook_dir = ''
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
#c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
#c.NotebookApp.password = ''
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine through ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
#c.NotebookApp.password_required = False
## The port the notebook server will listen on.
#c.NotebookApp.port = 8888
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Shut down the server after N seconds with no kernels or terminals running and
# no activity. This can be used together with culling idle kernels
# (MappingKernelManager.cull_idle_timeout) to shutdown the notebook server when
# it's not in use. This is not precisely timed: it may shut down up to a minute
# later. 0 (the default) disables this automatic shutdown.
#c.NotebookApp.shutdown_no_activity_timeout = 0
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## Specify Where to open the notebook on startup. This is the
# `new` argument passed to the standard library method `webbrowser.open`.
# The behaviour is not guaranteed, but depends on browser support. Valid
# values are:
# 2 opens a new tab,
# 1 opens a new window,
# 0 opens in an existing window.
# See the `webbrowser.open` documentation for details.
#c.NotebookApp.webbrowser_open_new = 2
## Set the tornado compression options for websocket connections.
#
# This value will be returned from
# :meth:`WebSocketHandler.get_compression_options`. None (default) will disable
# compression. A dict (even an empty one) will enable compression.
#
# See the tornado docs for WebSocketHandler.get_compression_options for details.
#c.NotebookApp.websocket_compression_options = None
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# LabApp(NotebookApp) configuration
#------------------------------------------------------------------------------
## The app directory to launch JupyterLab from.
#c.LabApp.app_dir = '/home/batman/miniconda3/share/jupyter/lab'
## Whether to start the app in core mode. In this mode, JupyterLab will run using
# the JavaScript assets that are within the installed JupyterLab Python package.
# In core mode, third party extensions are disabled. The `--dev-mode` flag is an
# alias to this to be used when the Python package itself is installed in
# development mode (`pip install -e .`).
#c.LabApp.core_mode = False
## The default URL to redirect to from `/`
#c.LabApp.default_url = '/lab'
## Whether to start the app in dev mode. Uses the unpublished local JavaScript
# packages in the `dev_mode` folder. In this case JupyterLab will show a red
# stripe at the top of the page. It can only be used if JupyterLab is installed
# as `pip install -e .`.
#c.LabApp.dev_mode = False
## The directory for user settings.
#c.LabApp.user_settings_dir = '/home/batman/.jupyter/lab/user-settings'
## Whether to serve the app in watch mode
#c.LabApp.watch = False
## The directory for workspaces
#c.LabApp.workspaces_dir = '/home/batman/.jupyter/lab/workspaces'
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'batman'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
## Whether messages from kernels whose frontends have disconnected should be
# buffered in-memory.
#
# When True (default), messages are buffered and replayed on reconnect, avoiding
# lost messages due to interrupted connectivity.
#
# Disable if long-running kernels will produce too much output while no
# frontends are connected.
#c.MappingKernelManager.buffer_offline_messages = True
## Whether to consider culling kernels which are busy. Only effective if
# cull_idle_timeout > 0.
#c.MappingKernelManager.cull_busy = False
## Whether to consider culling kernels which have one or more connections. Only
# effective if cull_idle_timeout > 0.
#c.MappingKernelManager.cull_connected = False
## Timeout (in seconds) after which a kernel is considered idle and ready to be
# culled. Values of 0 or lower disable culling. Very short timeouts may result
# in kernels being culled for users with poor network connections.
#c.MappingKernelManager.cull_idle_timeout = 0
## The interval (in seconds) on which to check for idle kernels exceeding the
# cull timeout value.
#c.MappingKernelManager.cull_interval = 300
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
## Allow access to hidden files
#c.ContentsManager.allow_hidden = False
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## handler class to use when serving raw file requests.
#
# Default is a fallback that talks to the ContentsManager API, which may be
# inefficient, especially for large files.
#
# Local files-based ContentsManagers can use a StaticFileHandler subclass, which
# will be much more efficient.
#
# Access to these files should be Authenticated.
#c.ContentsManager.files_handler_class = 'notebook.files.handlers.FilesHandler'
## Extra parameters to pass to files_handler_class.
#
# For example, StaticFileHandlers generally expect a `path` argument specifying
# the root directory from which to serve files.
#c.ContentsManager.files_handler_params = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## If True (default), deleting files will send them to the platform's
# trash/recycle bin, where they can be recovered. If False, deleting files
# really deletes them.
#c.FileContentsManager.delete_to_trash = True
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
| lgpl-3.0 |
rbdavid/Distance_matrix | Test_Case1/plotting_functions.py | 4 | 13432 | #!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
# USAGE:
# from fn_plotting.py import *
# PREAMBLE:
import numpy as np
import sys
import os
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib as mpl
from matplotlib.ticker import NullFormatter
stdev = np.std
sqrt = np.sqrt
nullfmt = NullFormatter()
# ----------------------------------------
# PLOTTING SUBROUTINES
def make_colormap(seq):
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mcolors.LinearSegmentedColormap('CustomMap', cdict)
def plot_1d(xdata, ydata, color, x_axis, y_axis, system, analysis, average = False, t0 = 0, **kwargs):
""" Creates a 1D scatter/line plot:
Usage: plot_1d(xdata, ydata, color, x_axis, y_axis, system, analysis, average = [False|True], t0 = 0)
Arguments:
xdata, ydata: self-explanatory
color: color to be used to plot data
x_axis, y_axis: strings to be used for the axis label
system: descriptor for the system that produced the data
analysis: descriptor for the analysis that produced the data
average: [False|True]; Default is False; if set to True, the function will calc the average, standard dev, and standard dev of mean of the y-data # THERE IS A BUG IF average=True; must read in yunits for this function to work at the moment.
t0: index to begin averaging from; Default is 0
kwargs:
xunits, yunits: string with correct math text describing the units for the x/y data
x_lim, y_lim: list w/ two elements, setting the limits of the x/y ranges of plot
plt_title: string to be added as the plot title
draw_line: int value that determines the line style to be drawn; giving myself space to add more line styles if I decide I need them
"""
# INITIATING THE PLOT...
plt.plot(xdata, ydata, '%s' %(color))
# READING IN KWARG DICTIONARY INTO SPECIFIC VARIABLES
for name, value in kwargs.items():
if name == 'xunits':
x_units = value
x_axis = '%s (%s)' %(x_axis, value)
elif name == 'yunits':
y_units = value
y_axis = '%s (%s)' %(y_axis, value)
elif name == 'x_lim':
plt.xlim(value)
elif name == 'y_lim':
plt.ylim(value)
elif name == 'plt_title':
plt.title(r'%s' %(value), size='14')
elif name == 'draw_line':
draw_line = value
if draw_line == 1:
plt.plot([0,max(ydata)],[0,max(ydata)],'r-',linewidth=2)
else:
print 'draw_line = %s has not been defined in plotting_functions script' %(line_value)
plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--')
plt.xlabel(r'%s' %(x_axis), size=12)
plt.ylabel(r'%s' %(y_axis), size=12)
# CALCULATING THE AVERAGE/SD/SDOM OF THE Y-DATA
if average != False:
avg = np.sum(ydata[t0:])/len(ydata[t0:])
SD = stdev(ydata[t0:])
SDOM = SD/sqrt(len(ydata[t0:]))
plt.axhline(avg, xmin=0.0, xmax=1.0, c='r')
plt.figtext(0.680, 0.780, '%s\n%6.4f $\\pm$ %6.4f %s \nSD = %4.3f %s' %(analysis, avg, SDOM, y_units, SD, y_units), bbox=dict(boxstyle='square', ec='r', fc='w'), fontsize=12)
plt.savefig('%s.%s.plot1d.png' %(system,analysis),dpi=300)
plt.close()
def hist1d(data, x_axis, system, analysis, num_b = 100, norm = False, average = False, t0 = 0, **kwargs):
""" Creates a 1D histogram:
Usage: hist1d(data, x_axis, num_b, system, analysis, norm)
Arguments:
data: self-explanatory
x_axis: string to be used for the axis label
system: descriptor for the system analyzed
analysis: descriptor for the analysis performed and plotted
num_b: number of bins to be used when binning the data; Default is 100
norm = [False][True]; Default is False; if False, plotting a frequency of data; if True, plotting a probability density
average: [False|True]; Default is False; if set to True, the function will calc the average, standard dev, and standard dev of mean of the y-data
t0: index to begin averaging from; Default is 0
kwargs:
xunits: string with correct math text describing the units for the x data
x_lim, y_lim: list w/ two elements, setting the limits of the x/y ranges of plot
plt_title: string to be added as the plot title
"""
# INITIATING THE PLOT...
events, edges, patches = plt.hist(data, bins=num_b, histtype = 'bar', normed=norm)
# READING IN KWARG DICTIONARY INTO SPECIFIC VARIABLES
for name, value in kwargs.items():
if name == 'xunits':
x_units = value
x_axis = '%s (%s)' %(x_axis, value)
elif name == 'x_lim':
plt.xlim(value)
elif name == 'y_lim':
plt.ylim(value)
elif name == 'plt_title':
plt.title(r'%s' %(value), size='14')
plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--')
plt.xlabel(r'%s' %(x_axis), size=12)
# CALCULATING THE AVERAGE/SD/SDOM OF THE Y-DATA
if average != False:
avg = np.sum(data[t0:])/len(data[t0:])
SD = stdev(data[t0:])
SDOM = SD/sqrt(len(data[t0:]))
plt.axvline(avg, ymin=0.0, ymax=1.0, c='r')
plt.figtext(0.680, 0.780, '%s\n%6.4f $\\pm$ %6.4f %s \nSD = %4.3f %s' %(analysis, avg, SDOM, x_units, SD, x_units), bbox=dict(boxstyle='square', ec='r', fc='w'), fontsize=12)
if norm == True:
plt.ylabel('Probability Density')
plt.savefig('%s.%s.prob1d.png' %(system,analysis),dpi=300)
nf = open('%s.%s.prob1d.dat' %(system,analysis),'w')
else:
plt.ylabel('Frequency', size=12)
plt.savefig('%s.%s.hist1d.png' %(system,analysis),dpi=300)
nf = open('%s.%s.hist1d.dat' %(system,analysis), 'w')
for i in range(len(events)):
nf.write('%10.1f %10.4f\n' %(events[i], edges[i]))
plt.close()
nf.close()
events = []
edges = []
patches = []
def scat_hist(xdata, ydata, color, x_axis, y_axis, system, analysis, num_b = 100, average = False, t0 = 0, **kwargs):
""" Creates 1D scatter plot w/ a 1D histogram
Usage: scat_hist(xdata, ydata, color, x_axis, y_axis, system, analysis, num_b)
Arguments:
xdata, ydata: self-explanatory
color: color to be used to plot data
x_axis, y_axis: strings to be printed on the axi labels
system: descriptor for the system analyzed
analysis: descriptor for the analysis performed and plotted
num_b: number of bins to be used when binning the data; Default is 100
average: [False|True]; Default is False; if set to True, the function will calc the average, standard dev, and standard dev of mean of the y-data # THERE IS A BUG; if average = True, need to read in xunits for this function to work...
t0: index to begin averaging from; Default is 0
kwargs:
xunits, yunits: string with correct math text describing the units for the x/y data
x_lim, y_lim: list w/ two elements, setting the limits of the x/y ranges of plot
plt_title: string to be added as the plot title
"""
# INITIATING THE PLOT SIZES
left, width = 0.1, 0.65
bottom, height = 0.1, 0.8
bottom_h = left_h = left+width+0.01
rect_scatter = [left, bottom, width, height]
rect_histy = [left_h, bottom, 0.2, height]
# INITIATING THE PLOT...
plt.figure(1, figsize=(10,8))
axScatter =plt.axes(rect_scatter)
axScatter.plot(xdata, ydata, '%s.' %(color))
# READING IN KWARG DICTIONARY INTO SPECIFIC VARIABLES
for name, value in kwargs.items():
if name == 'xunits':
x_units = value
x_axis = '%s (%s)' %(x_axis, value)
elif name == 'yunits':
y_units = value
y_axis = '%s (%s)' %(y_axis, value)
elif name == 'x_lim':
plt.xlim(value)
elif name == 'y_lim':
plt.ylim(value)
elif name == 'plt_title':
plt.title(r'%s' %(value), size='14')
plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--')
# plt.xlim((0,500))
plt.ylabel(r'%s' %(y_axis),size=12)
plt.xlabel(r'%s' %(x_axis),size=12)
if average != False:
avg = np.sum(ydata[t0:])/len(ydata[t0:])
SD = stdev(ydata[t0:])
SDOM = SD/sqrt(len(ydata[t0:]))
plt.axhline(avg, xmin=0.0, xmax=1.0, c='r')
axHisty = plt.axes(rect_histy)
axHisty.yaxis.set_major_formatter(nullfmt)
axHisty.xaxis.set_major_formatter(nullfmt)
plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--')
axHisty.hist(ydata, bins=num_b, orientation='horizontal', color = ['gray'])
axHisty.set_ylim(axScatter.get_ylim())
# CALCULATING THE AVERAGE/SD/SDOM OF THE Y-DATA
if average != False:
plt.axhline(avg, xmin=0.0, xmax=1.0, c='r')
plt.figtext(0.775, 0.810, '%s\n%6.4f $\\pm$ %6.4f %s \nSD = %4.3f %s' %(analysis, avg, SDOM, y_units, SD, y_units), bbox=dict(boxstyle='square', ec='r', fc='w'), fontsize=12)
plt.savefig('%s.%s.scat_hist.png' %(system, analysis),dpi=300)
plt.close()
def bar(xdata, ydata, x_axis, y_axis, system, analysis, **kwargs):
""" Creates a bar graph
Usage: bar(xdata, ydata, x_axis, y_axis, **kwarg)
Arguments:
xdata, ydata: self-explanatory
x_axis, y_axis: strings to be printed on the axi labels
system: descriptor for the system analyzed
analysis: descriptor for the analysis performed and plotted
kwargs:
xunits, yunits: string with correct math text describing the units for the x/y data
x_lim, y_lim: list (or tuple) w/ two elements, setting the limits of the x/y ranges of plot
plt_title: string to be added as the plot title
"""
# INITIATING THE PLOT...
plt.bar(xdata,ydata)
# READING IN KWARG DICTIONARY INTO SPECIFIC VARIABLES
for name, value in kwargs.items():
if name == 'xunits':
x_units = value
x_axis = '%s (%s)' %(x_axis, value)
elif name == 'yunits':
y_units = value
y_axis = '%s (%s)' %(y_axis, value)
elif name == 'x_lim':
plt.xlim(value)
elif name == 'y_lim':
plt.ylim(value)
elif name == 'plt_title':
plt.title(r'%s' %(value), size='16')
plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--')
plt.ylabel(r'%s' %(y_axis),size=12)
plt.xlabel(r'%s' %(x_axis),size=12)
plt.savefig('%s.%s.bar.png' %(system,analysis),dpi=300)
plt.close()
def hist2d(xdata, ydata, x_axis, y_axis, num_b, system, analysis, norm):
""" Creates a 2D histogram (heat map)
Usage: hist2d(xdata, ydata, x_axis, y_axis, num_b, system, analysis, norm)
Arguments:
xdata, ydata: self-explanatory
x_axis, y_axis: strings to be printed on the axi labels
num_b: number of bins to be used when binning the data
system: descriptor for the system analyzed
analysis: descriptor for the analysis performed and plotted
norm = [False][True]; if False, plotting a frequency of data; if True, plotting a probability density
"""
my_cmap = plt.cm.get_cmap('jet')
my_cmap.set_under('w')
counts, xedges, yedges, image = plt.hist2d(xdata, ydata, bins=num_b, normed=norm, cmap=my_cmap, vmin=0.001)#, cmap=plt.get_cmap('jet')) # cmap: jet (blue to red), blues (white to blue), ...
cb1 = plt.colorbar()
if norm == True:
cb1.set_label('Prob. Density', size=12)
else:
cb1.set_label('Frequency')
# plt.title('Distribution of Base Pair interactions - %s-%s' %(base_a, base_b))
# plt.xlim((0,8))
# plt.ylim((0,8))
plt.xlabel(r'%s' %(x_axis), size=12)
plt.ylabel(r'%s' %(y_axis), size=12)
plt.savefig('%s.%s.hist2d.png' %(system, analysis),dpi=300)
plt.close()
counts = []
xedges = []
yedges = []
image = []
def matrix2d(matrix, x_axis, y_axis, cb_axis, system, analysis, **kwargs):
""" Creates a 2D matrix image
Usage: matrix2d(matrix,x_axis,y_axis,system,analysis)
Arguments:
matrix: the data matrix to be plotted (should have shape of MxN, but can have MxNx3 or MxNx4)
x_axis, y_axis: strings to be printed on the axi labels
system: descriptor for the system analyzed
analysis: descriptor for the analysis performed and plotted
kwargs:
vmin, vmax: floats that define the limits for the color bar; if below vmin, data will be colored white; if above vmax, data will be colored red (might want to change this for aesthetics)
plt_title: string to be added as the plot title
cb_units: sting to be added to the color bar label to indicate the units of the color bar
xlim, ylim: list (or tuple) w/ two elements, setting the limits of the x/y ranges of plot
"""
vmin =0.001
vmax = None
# c = mcolors.ColorConverter().to_rgb
# bgr = make_colormap([c('blue'),c('lime'),0.50,c('lime'),c('red'),1.00,c('red')])
# bgr = make_colormap([c('red'),c('lime'),0.50,c('lime'),c('blue'),1.00,c('blue')])
# bgr.set_under('k')
# bgr.set_over('r')
# bgr.set_over('w')
# my_cmap = bgr
my_cmap = plt.cm.get_cmap('jet')
# my_cmap = plt.cm.get_cmap('gray')
# READING IN KWARG DICTIONARY INTO SPECIFIC VARIABLES
for name, value in kwargs.items():
if name == 'vmin':
vmin = value
elif name == 'vmax':
vmax = value
elif name == 'cb_units':
cb_units = value
cb_axis = '%s (%s)' %(cb_axis, value)
elif name == 'plt_title':
plt.title(r'%s' %(value), size='14')
elif name == 'xlim':
plt.xlim(value)
elif name == 'ylim':
plt.ylim(value)
plt.pcolor(matrix,cmap=my_cmap,vmin=vmin,vmax=vmax) # ,interpolation='none',origin='lower'
cb1 = plt.colorbar(extend='max',cmap=my_cmap)
cb1.set_label(r'%s' %(cb_axis), size=14)
plt.grid(b=True, which='major', axis='both', color='#808080', linestyle='--')
plt.xlabel(r'%s' %(x_axis), size=14)
plt.ylabel(r'%s' %(y_axis), size=14)
plt.savefig('%s.%s.heatmap.png' %(system, analysis),dpi=300)
plt.close()
| gpl-3.0 |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/tseries/tests/test_converter.py | 1 | 5709 | from datetime import datetime, date
import nose
import numpy as np
from numpy.testing import assert_almost_equal as np_assert_almost_equal
from pandas import Timestamp, Period
from pandas.compat import u
import pandas.util.testing as tm
from pandas.tseries.offsets import Second, Milli, Micro
try:
import pandas.tseries.converter as converter
except ImportError:
raise nose.SkipTest("no pandas.tseries.converter, skipping")
def test_timtetonum_accepts_unicode():
assert (converter.time2num("00:01") == converter.time2num(u("00:01")))
class TestDateTimeConverter(tm.TestCase):
def setUp(self):
self.dtc = converter.DatetimeConverter()
self.tc = converter.TimeFormatter(None)
def test_convert_accepts_unicode(self):
r1 = self.dtc.convert("12:22", None, None)
r2 = self.dtc.convert(u("12:22"), None, None)
assert (r1 == r2), "DatetimeConverter.convert should accept unicode"
def test_conversion(self):
rs = self.dtc.convert(['2012-1-1'], None, None)[0]
xp = datetime(2012, 1, 1).toordinal()
self.assertEqual(rs, xp)
rs = self.dtc.convert('2012-1-1', None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(date(2012, 1, 1), None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(datetime(2012, 1, 1).toordinal(), None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert('2012-1-1', None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(Timestamp('2012-1-1'), None, None)
self.assertEqual(rs, xp)
# also testing datetime64 dtype (GH8614)
rs = self.dtc.convert(np.datetime64('2012-01-01'), None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(np.datetime64(
'2012-01-01 00:00:00+00:00'), None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(np.array([
np.datetime64('2012-01-01 00:00:00+00:00'),
np.datetime64('2012-01-02 00:00:00+00:00')]), None, None)
self.assertEqual(rs[0], xp)
def test_conversion_float(self):
decimals = 9
rs = self.dtc.convert(
Timestamp('2012-1-1 01:02:03', tz='UTC'), None, None)
xp = converter.dates.date2num(Timestamp('2012-1-1 01:02:03', tz='UTC'))
np_assert_almost_equal(rs, xp, decimals)
rs = self.dtc.convert(
Timestamp('2012-1-1 09:02:03', tz='Asia/Hong_Kong'), None, None)
np_assert_almost_equal(rs, xp, decimals)
rs = self.dtc.convert(datetime(2012, 1, 1, 1, 2, 3), None, None)
np_assert_almost_equal(rs, xp, decimals)
def test_time_formatter(self):
self.tc(90000)
def test_dateindex_conversion(self):
decimals = 9
for freq in ('B', 'L', 'S'):
dateindex = tm.makeDateIndex(k=10, freq=freq)
rs = self.dtc.convert(dateindex, None, None)
xp = converter.dates.date2num(dateindex._mpl_repr())
np_assert_almost_equal(rs, xp, decimals)
def test_resolution(self):
def _assert_less(ts1, ts2):
val1 = self.dtc.convert(ts1, None, None)
val2 = self.dtc.convert(ts2, None, None)
if not val1 < val2:
raise AssertionError('{0} is not less than {1}.'.format(val1,
val2))
# Matplotlib's time representation using floats cannot distinguish
# intervals smaller than ~10 microsecond in the common range of years.
ts = Timestamp('2012-1-1')
_assert_less(ts, ts + Second())
_assert_less(ts, ts + Milli())
_assert_less(ts, ts + Micro(50))
class TestPeriodConverter(tm.TestCase):
def setUp(self):
self.pc = converter.PeriodConverter()
class Axis(object):
pass
self.axis = Axis()
self.axis.freq = 'D'
def test_convert_accepts_unicode(self):
r1 = self.pc.convert("2012-1-1", None, self.axis)
r2 = self.pc.convert(u("2012-1-1"), None, self.axis)
self.assert_equal(r1, r2,
"PeriodConverter.convert should accept unicode")
def test_conversion(self):
rs = self.pc.convert(['2012-1-1'], None, self.axis)[0]
xp = Period('2012-1-1').ordinal
self.assertEqual(rs, xp)
rs = self.pc.convert('2012-1-1', None, self.axis)
self.assertEqual(rs, xp)
rs = self.pc.convert([date(2012, 1, 1)], None, self.axis)[0]
self.assertEqual(rs, xp)
rs = self.pc.convert(date(2012, 1, 1), None, self.axis)
self.assertEqual(rs, xp)
rs = self.pc.convert([Timestamp('2012-1-1')], None, self.axis)[0]
self.assertEqual(rs, xp)
rs = self.pc.convert(Timestamp('2012-1-1'), None, self.axis)
self.assertEqual(rs, xp)
# FIXME
# rs = self.pc.convert(np.datetime64('2012-01-01'), None, self.axis)
# self.assertEqual(rs, xp)
#
# rs = self.pc.convert(np.datetime64('2012-01-01 00:00:00+00:00'),
# None, self.axis)
# self.assertEqual(rs, xp)
#
# rs = self.pc.convert(np.array([
# np.datetime64('2012-01-01 00:00:00+00:00'),
# np.datetime64('2012-01-02 00:00:00+00:00')]), None, self.axis)
# self.assertEqual(rs[0], xp)
def test_integer_passthrough(self):
# GH9012
rs = self.pc.convert([0, 1], None, self.axis)
xp = [0, 1]
self.assertEqual(rs, xp)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
ooovector/qtlab_replacement | interleaved_benchmarking.py | 1 | 6934 | from . import sweep
import numpy as np
import matplotlib.pyplot as plt
class interleaved_benchmarking:
def __init__(self, measurer, set_seq, interleavers = None, random_sequence_num=8):
self.measurer = measurer
self.set_seq = set_seq
self.interleavers = {}
if interleavers is not None:
for name, gate in interleavers.items():
self.add_interleaver(name, gate['pulses'], gate['unitary'])
#self.initial_state_vector = np.asarray([1, 0]).T
self.random_sequence_num = random_sequence_num
self.sequence_length = 20
self.target_gate = []
#self.target_gate_unitary = np.asarray([[1,0],[0,1]], dtype=np.complex)
self.target_gate_name = 'Identity (benchmarking)'
self.reference_benchmark_result = None
self.interleaving_sequences = None
self.final_ground_state_rotation = True
self.prepare_random_sequence_before_measure = True
# used to transformed any of the |0>, |1>, |+>, |->, |i+>, |i-> states into the |0> state
# low-budget function only appropiate for clifford benchmarking
# higher budget functions require arbitrary rotation pulse generator
# which we unfortunately don't have (yet)
# maybe Chernogolovka control experiment will do this
def state_to_zero_transformer(self, psi):
good_interleavers_length = {}
# try each of the interleavers available
for name, interleaver in self.interleavers.items():
result = np.dot(interleaver['unitary'], psi)
if (1-np.abs(np.dot(result, self.initial_state_vector)))<1e-6:
# if the gate does what we want than we append it to our pulse list
good_interleavers_length[name] = len(interleaver['pulses'])
# check our pulse list for the shortest pulse
# return the name of the best interleaver
# the whole 'pulse name' logic is stupid
# if gates are better parameterized by numbers rather than names (which is true for non-Cliffords)
name = min(good_interleavers_length, key=good_interleavers_length.get)
return name, self.interleavers[name]
def set_sequence_length(self, sequence_length):
self.sequence_length = sequence_length
def set_sequence_length_and_regenerate(self, sequence_length):
self.sequence_length = sequence_length
self.prepare_random_interleaving_sequences()
def set_target_pulse(self, x):
self.target_gate = x['pulses']
self.target_gate_unitary = x['unitary']
def prepare_random_interleaving_sequences(self):
self.interleaving_sequences = [self.generate_random_interleaver_sequence(self.sequence_length) for i in range(self.random_sequence_num)]
def add_interleaver(self, name, pulse_seq, unitary):
self.d = unitary.shape[0]
self.initial_state_vector = np.zeros(self.d)
self.initial_state_vector[0] = 1.
self.target_gate_unitary = np.identity(self.d, dtype=np.complex)
self.interleavers[name] = {'pulses': pulse_seq, 'unitary': unitary}
def generate_interleaver_sequence_from_names(self, names):
sequence_pulses = [self.interleavers[k]['pulses'] for k in names]
sequence_unitaries = [self.interleavers[k]['unitary'] for k in names]
psi = self.initial_state_vector.copy()
for U in sequence_unitaries:
psi = np.dot(U, psi)
rho = np.einsum('i,j->ij', np.conj(psi), psi)
return {'Gate names':names,
'Gate unitaries': sequence_unitaries,
'Pulse sequence': sequence_pulses,
'Final state vector': psi,
'Final state matrix': rho}
def generate_random_interleaver_sequence(self, n):
ilk = [k for k in self.interleavers.keys()]
ilv = [self.interleavers[k] for k in ilk]
sequence = np.random.randint (len(ilv), size = n).tolist()
sequence_pulses = [j for i in [ilv[i]['pulses'] for i in sequence] for j in i]
sequence_unitaries = [ilv[i]['unitary'] for i in sequence]
sequence_gate_names = [ilk[i] for i in sequence]
psi = self.initial_state_vector.copy()
for U in sequence_unitaries:
psi = np.dot(U, psi)
rho = np.einsum('i,j->ij', np.conj(psi), psi)
return {'Gate names':sequence_gate_names,
'Gate unitaries': sequence_unitaries,
'Pulse sequence': sequence_pulses,
'Final state vector': psi,
'Final state matrix': rho}
# TODO: density matrix evolution operator for non-hamiltonian mechnics
def interleave(self, sequence_gate_names, pulse, unitary, gate_name):
sequence_unitaries = [self.interleavers[i]['unitary'] for i in sequence_gate_names]
sequence_pulses = []
interleaved_sequence_gate_names = []
psi = self.initial_state_vector.copy()
for i in sequence_gate_names:
psi = np.dot(unitary, np.dot(self.interleavers[i]['unitary'], psi))
sequence_pulses.extend(self.interleavers[i]['pulses'])
sequence_pulses.extend(pulse)
interleaved_sequence_gate_names.append(i)
interleaved_sequence_gate_names.append(gate_name)
if self.final_ground_state_rotation:
final_rotation_name, final_rotation = self.state_to_zero_transformer(psi)
# we need something separate from the interleavers for the final gate
# the logic is in principle OK but the final gate should be a function, n
psi = np.dot(final_rotation['unitary'], psi)
sequence_pulses.extend(final_rotation['pulses'])
interleaved_sequence_gate_names.append(final_rotation_name)
rho = np.einsum('i,j->ij', np.conj(psi), psi)
return {'Gate names':interleaved_sequence_gate_names,
'Gate unitaries': sequence_unitaries,
'Pulse sequence': sequence_pulses,
'Final state vector': psi,
'Final state matrix': rho}
def reference_benchmark(self):
old_target_gate = self.target_gate
old_target_gate_unitary = self.target_gate_unitary
old_target_gate_name = self.target_gate_name
self.target_gate = []
self.target_gate_unitary = np.asarray([[1,0],[0,1]], dtype=np.complex)
self.target_gate_name = 'Identity (benchmarking)'
self.reference_benchmark_result = self.measure()
self.target_gate = old_target_gate
self.target_gate_unitary = old_target_gate_unitary
self.target_gate_name = old_target_gate_name
return self.reference_benchmark_result
def get_dtype(self):
dtypes = self.measurer.get_dtype()
#dtypes['Sequence'] = object
return dtypes
def get_opts(self):
opts = self.measurer.get_opts()
#opts['Sequence'] = {}
return opts
def get_points(self):
#points = tuple([])
_points = {keys:values for keys, values in self. measurer.get_points().items()}
#_points['Sequence'] = points
return _points
def set_interleaved_sequence(self, seq_id):
seq = self.interleave(self.interleaving_sequences[seq_id]['Gate names'], self.target_gate, self.target_gate_unitary, self.target_gate_name)
self.set_seq(seq['Pulse sequence'])
self.current_seq = seq
def measure(self):
if self.prepare_random_sequence_before_measure:
self.prepare_random_interleaving_sequences()
self.set_interleaved_sequence(0)
measurement = self. measurer.measure()
#measurement['Pulse sequence'] = np.array([object()])
#measurement['Sequence'] = self.current_seq['Gate names']
return measurement | gpl-3.0 |
chrsrds/scikit-learn | sklearn/linear_model/logistic.py | 1 | 91847 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
# Arthur Mensch <[email protected]
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from scipy.special import expit
from joblib import Parallel, delayed, effective_n_jobs
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (log_logistic, safe_sparse_dot, softmax,
squared_norm)
from ..utils.extmath import row_norms
from ..utils.fixes import logsumexp
from ..utils.optimize import newton_cg, _check_optimize_result
from ..utils.validation import check_X_y
from ..utils.validation import check_is_fitted
from ..utils import deprecated
from ..exceptions import ChangedBehaviorWarning
from ..utils.multiclass import check_classification_targets
from ..utils.fixes import _joblib_parallel_args
from ..model_selection import check_cv
from ..metrics import get_scorer
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
Returns
-------
w : ndarray, shape (n_features,)
Coefficient vector without the intercept weight (w[-1]) if the
intercept should be fit. Unchanged otherwise.
c : float
The intercept.
yz : float
y * np.dot(X, w).
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
yz = y * z
return w, c, yz
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(n_samples)
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,)
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,)
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)),
dtype=X.dtype)
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,)
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver(solver, penalty, dual):
all_solvers = ['liblinear', 'newton-cg', 'lbfgs', 'sag', 'saga']
if solver not in all_solvers:
raise ValueError("Logistic Regression supports only solvers in %s, got"
" %s." % (all_solvers, solver))
all_penalties = ['l1', 'l2', 'elasticnet', 'none']
if penalty not in all_penalties:
raise ValueError("Logistic Regression supports only penalties in %s,"
" got %s." % (all_penalties, penalty))
if solver not in ['liblinear', 'saga'] and penalty not in ('l2', 'none'):
raise ValueError("Solver %s supports only 'l2' or 'none' penalties, "
"got %s penalty." % (solver, penalty))
if solver != 'liblinear' and dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
if penalty == 'elasticnet' and solver != 'saga':
raise ValueError("Only 'saga' solver supports elasticnet penalty,"
" got solver={}.".format(solver))
if solver == 'liblinear' and penalty == 'none':
raise ValueError(
"penalty='none' is not supported for the liblinear solver"
)
return solver
def _check_multi_class(multi_class, solver, n_classes):
if multi_class == 'auto':
if solver == 'liblinear':
multi_class = 'ovr'
elif n_classes > 2:
multi_class = 'multinomial'
else:
multi_class = 'ovr'
if multi_class not in ('multinomial', 'ovr'):
raise ValueError("multi_class should be 'multinomial', 'ovr' or "
"'auto'. Got %s." % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
return multi_class
@deprecated('logistic_regression_path was deprecated in version 0.21 and '
'will be removed in version 0.23.0')
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='auto',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
.. deprecated:: 0.21
``logistic_regression_path`` was deprecated in version 0.21 and will
be removed in 0.23.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1', 'l2', or 'elasticnet'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'ovr', 'multinomial', 'auto'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
return _logistic_regression_path(
X, y, pos_class=None, Cs=10, fit_intercept=True, max_iter=100,
tol=1e-4, verbose=0, solver='lbfgs', coef=None, class_weight=None,
dual=False, penalty='l2', intercept_scaling=1., multi_class='auto',
random_state=None, check_input=True, max_squared_sum=None,
sample_weight=None, l1_ratio=None)
def _logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='auto',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1', 'l2', or 'elasticnet'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'ovr', 'multinomial', 'auto'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64,
accept_large_sparse=solver != 'liblinear')
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=X.dtype, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver not in ['sag', 'saga']:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F', dtype=X.dtype)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
if n_classes == 1:
w0[0, :coef.shape[1]] = -coef
w0[1, :coef.shape[1]] = coef
else:
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# scipy.optimize.minimize and newton-cg accepts only
# ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
opt_res = optimize.minimize(
func, w0, method="L-BFGS-B", jac=True,
args=(X, target, 1. / C, sample_weight),
options={"iprint": iprint, "gtol": tol, "maxiter": max_iter}
)
n_iter_i = _check_optimize_result(solver, opt_res, max_iter)
w0, loss = opt_res.x, opt_res.fun
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ['sag', 'saga']:
if multi_class == 'multinomial':
target = target.astype(X.dtype, copy=False)
loss = 'multinomial'
else:
loss = 'log'
# alpha is for L2-norm, beta is for L1-norm
if penalty == 'l1':
alpha = 0.
beta = 1. / C
elif penalty == 'l2':
alpha = 1. / C
beta = 0.
else: # Elastic-Net penalty
alpha = (1. / C) * (1 - l1_ratio)
beta = (1. / C) * l1_ratio
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, alpha,
beta, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag,
is_saga=(solver == 'saga'))
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
n_classes = max(2, classes.size)
multi_w0 = np.reshape(w0, (n_classes, -1))
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0.copy())
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return np.array(coefs), np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='auto', random_state=None,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Decides which solver to use.
penalty : str, 'l1', 'l2', or 'elasticnet'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'ovr', 'multinomial'}
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' and
'liblinear'.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
sample_weight = sample_weight[train]
coefs, Cs, n_iter = _logistic_regression_path(
X_train, y_train, Cs=Cs, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, solver=solver, max_iter=max_iter,
class_weight=class_weight, pos_class=pos_class,
multi_class=multi_class, tol=tol, verbose=verbose, dual=dual,
penalty=penalty, intercept_scaling=intercept_scaling,
random_state=random_state, check_input=False,
max_squared_sum=max_squared_sum, sample_weight=sample_weight)
log_reg = LogisticRegression(solver=solver, multi_class=multi_class)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
scores = list()
if isinstance(scoring, str):
scoring = get_scorer(scoring)
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr', and uses the
cross-entropy loss if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag', 'saga' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note
that regularization is applied by default**. It can handle both dense
and sparse input. Use C-ordered arrays or CSR matrices containing 64-bit
floats for optimal performance; any other input format will be converted
(and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation, or no regularization. The 'liblinear' solver
supports both L1 and L2 regularization, with a dual formulation only for
the L2 penalty. The Elastic-Net regularization is only supported by the
'saga' solver.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1', 'l2', 'elasticnet' or 'none', optional (default='l2')
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver. If 'none' (not supported by the
liblinear solver), no regularization is applied.
.. versionadded:: 0.19
l1 penalty with SAGA solver (allowing 'multinomial' + L1)
dual : bool, optional (default=False)
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, optional (default=True)
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, optional (default=1)
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'*
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
solver : str, {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, \
optional (default='lbfgs').
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' and
'saga' are faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'
handle multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs', 'sag' and 'saga' handle L2 or no penalty
- 'liblinear' and 'saga' also handle L1 penalty
- 'saga' also supports 'elasticnet' penalty
- 'liblinear' does not support setting ``penalty='none'``
Note that 'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can
preprocess the data with a scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
.. versionchanged:: 0.22
The default solver changed from 'liblinear' to 'lbfgs' in 0.22.
max_iter : int, optional (default=100)
Maximum number of iterations taken for the solvers to converge.
multi_class : {'ovr', 'multinomial', 'auto'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
verbose : int, optional (default=0)
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, optional (default=False)
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers.
n_jobs : int or None, optional (default=None)
Number of CPU cores used when parallelizing over classes if
multi_class='ovr'". This parameter is ignored when the ``solver`` is
set to 'liblinear' regardless of whether 'multi_class' is specified or
not. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors.
See :term:`Glossary <n_jobs>` for more details.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'`. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Attributes
----------
classes_ : array, shape (n_classes, )
A list of class labels known to the classifier.
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem is binary.
In particular, when `multi_class='multinomial'`, `coef_` corresponds
to outcome 1 (True) and `-coef_` corresponds to outcome 0 (False).
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape (1,) when the given problem is binary.
In particular, when `multi_class='multinomial'`, `intercept_`
corresponds to outcome 1 (True) and `-intercept_` corresponds to
outcome 0 (False).
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
.. versionchanged:: 0.20
In SciPy <= 1.0.0 the number of lbfgs iterations may exceed
``max_iter``. ``n_iter_`` will now report at most ``max_iter``.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegression(random_state=0).fit(X, y)
>>> clf.predict(X[:2, :])
array([0, 0])
>>> clf.predict_proba(X[:2, :])
array([[9.8...e-01, 1.8...e-02, 1.4...e-08],
[9.7...e-01, 2.8...e-02, ...e-08]])
>>> clf.score(X, y)
0.97...
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
LogisticRegressionCV : Logistic regression with built-in cross validation
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
L-BFGS-B -- Software for Large-scale Bound-constrained Optimization
Ciyou Zhu, Richard Byrd, Jorge Nocedal and Jose Luis Morales.
http://users.iems.northwestern.edu/~nocedal/lbfgsb.html
LIBLINEAR -- A Library for Large Linear Classification
https://www.csie.ntu.edu.tw/~cjlin/liblinear/
SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach
Minimizing Finite Sums with the Stochastic Average Gradient
https://hal.inria.fr/hal-00860051/document
SAGA -- Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
https://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='lbfgs', max_iter=100,
multi_class='auto', verbose=0, warm_start=False, n_jobs=None,
l1_ratio=None):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
self.l1_ratio = l1_ratio
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
Notes
-----
The SAGA solver supports both float64 and float32 bit arrays.
"""
solver = _check_solver(self.solver, self.penalty, self.dual)
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if self.penalty == 'elasticnet':
if (not isinstance(self.l1_ratio, numbers.Number) or
self.l1_ratio < 0 or self.l1_ratio > 1):
raise ValueError("l1_ratio must be between 0 and 1;"
" got (l1_ratio=%r)" % self.l1_ratio)
elif self.l1_ratio is not None:
warnings.warn("l1_ratio parameter is only used when penalty is "
"'elasticnet'. Got "
"(penalty={})".format(self.penalty))
if self.penalty == 'none':
if self.C != 1.0: # default values
warnings.warn(
"Setting penalty='none' will ignore the C and l1_ratio "
"parameters"
)
# Note that check for l1_ratio is done right above
C_ = np.inf
penalty = 'l2'
else:
C_ = self.C
penalty = self.penalty
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
if solver in ['lbfgs', 'liblinear']:
_dtype = np.float64
else:
_dtype = [np.float64, np.float32]
X, y = check_X_y(X, y, accept_sparse='csr', dtype=_dtype, order="C",
accept_large_sparse=solver != 'liblinear')
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
multi_class = _check_multi_class(self.multi_class, solver,
len(self.classes_))
if solver == 'liblinear':
if effective_n_jobs(self.n_jobs) != 1:
warnings.warn("'n_jobs' > 1 does not have any effect when"
" 'solver' is set to 'liblinear'. Got 'n_jobs'"
" = {}.".format(effective_n_jobs(self.n_jobs)))
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(_logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if solver in ['sag', 'saga']:
prefer = 'threads'
else:
prefer = 'processes'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer=prefer))(
path_func(X, y, pos_class=class_, Cs=[C_],
l1_ratio=self.l1_ratio, fit_intercept=self.fit_intercept,
tol=self.tol, verbose=self.verbose, solver=solver,
multi_class=multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
penalty=penalty, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for class_, warm_start_coef_ in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
check_is_fitted(self, 'coef_')
ovr = (self.multi_class in ["ovr", "warn"] or
(self.multi_class == 'auto' and (self.classes_.size <= 2 or
self.solver == 'liblinear')))
if ovr:
return super()._predict_proba_lr(X)
else:
decision = self.decision_function(X)
if decision.ndim == 1:
# Workaround for multi_class="multinomial" and binary outcomes
# which requires softmax prediction with only a 1D decision.
decision_2d = np.c_[-decision, decision]
else:
decision_2d = decision
return softmax(decision_2d, copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
See glossary entry for :term:`cross-validation estimator`.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
Elastic-Net penalty is only supported by the saga solver.
For the grid of `Cs` values and `l1_ratios` values, the best
hyperparameter is selected by the cross-validator `StratifiedKFold`, but
it can be changed using the `cv` parameter. The 'newton-cg', 'sag',
'saga' and 'lbfgs' solvers can warm-start the coefficients (see
:term:`Glossary<warm_start>`).
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats or int, optional (default=10)
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, optional (default=True)
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
cv : int or cross-validation generator, optional (default=None)
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
dual : bool, optional (default=False)
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1', 'l2', or 'elasticnet', optional (default='l2')
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
scoring : string, callable, or None, optional (default=None)
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is 'accuracy'.
solver : str, {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, \
optional (default='lbfgs')
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' and
'saga' are faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'
handle multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty, whereas
'liblinear' and 'saga' handle L1 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
Note that 'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can preprocess the data
with a scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
max_iter : int, optional (default=100)
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
n_jobs : int or None, optional (default=None)
Number of CPU cores used during the cross-validation loop.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, optional (default=0)
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool, optional (default=True)
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
intercept_scaling : float, optional (default=1)
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'ovr', 'multinomial', 'auto'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
l1_ratios : list of float or None, optional (default=None)
The list of Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``.
Only used if ``penalty='elasticnet'``. A value of 0 is equivalent to
using ``penalty='l2'``, while 1 is equivalent to using
``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination
of L1 and L2.
Attributes
----------
classes_ : array, shape (n_classes, )
A list of class labels known to the classifier.
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
Cs_ : array, shape (n_cs)
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
l1_ratios_ : array, shape (n_l1_ratios)
Array of l1_ratios used for cross-validation. If no l1_ratio is used
(i.e. penalty is not 'elasticnet'), this is set to ``[None]``
coefs_paths_ : array, shape (n_folds, n_cs, n_features) or \
(n_folds, n_cs, n_features + 1)
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, n_cs, n_features)`` or
``(n_folds, n_cs, n_features + 1)`` depending on whether the
intercept is fit or not. If ``penalty='elasticnet'``, the shape is
``(n_folds, n_cs, n_l1_ratios_, n_features)`` or
``(n_folds, n_cs, n_l1_ratios_, n_features + 1)``.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class. Each dict value
has shape ``(n_folds, n_cs`` or ``(n_folds, n_cs, n_l1_ratios)`` if
``penalty='elasticnet'``.
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
`C_` is of shape(n_classes,) when the problem is binary.
l1_ratio_ : array, shape (n_classes,) or (n_classes - 1,)
Array of l1_ratio that maps to the best scores across every class. If
refit is set to False, then for each class, the best l1_ratio is the
average of the l1_ratio's that correspond to the best scores for each
fold. `l1_ratio_` is of shape(n_classes,) when the problem is binary.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
If ``penalty='elasticnet'``, the shape is ``(n_classes, n_folds,
n_cs, n_l1_ratios)`` or ``(1, n_folds, n_cs, n_l1_ratios)``.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegressionCV
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegressionCV(cv=5, random_state=0).fit(X, y)
>>> clf.predict(X[:2, :])
array([0, 0])
>>> clf.predict_proba(X[:2, :]).shape
(2, 3)
>>> clf.score(X, y)
0.98...
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=None, verbose=0,
refit=True, intercept_scaling=1., multi_class='auto',
random_state=None, l1_ratios=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
self.l1_ratios = l1_ratios
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
"""
solver = _check_solver(self.solver, self.penalty, self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
if self.penalty == 'elasticnet':
if self.l1_ratios is None or len(self.l1_ratios) == 0 or any(
(not isinstance(l1_ratio, numbers.Number) or l1_ratio < 0
or l1_ratio > 1) for l1_ratio in self.l1_ratios):
raise ValueError("l1_ratios must be a list of numbers between "
"0 and 1; got (l1_ratios=%r)" %
self.l1_ratios)
l1_ratios_ = self.l1_ratios
else:
if self.l1_ratios is not None:
warnings.warn("l1_ratios parameter is only used when penalty "
"is 'elasticnet'. Got (penalty={})".format(
self.penalty))
l1_ratios_ = [None]
if self.penalty == 'none':
raise ValueError(
"penalty='none' is not useful and not supported by "
"LogisticRegressionCV."
)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C",
accept_large_sparse=solver != 'liblinear')
check_classification_targets(y)
class_weight = self.class_weight
# Encode for string labels
label_encoder = LabelEncoder().fit(y)
y = label_encoder.transform(y)
if isinstance(class_weight, dict):
class_weight = {label_encoder.transform([cls])[0]: v
for cls, v in class_weight.items()}
# The original class labels
classes = self.classes_ = label_encoder.classes_
encoded_labels = label_encoder.transform(label_encoder.classes_)
multi_class = _check_multi_class(self.multi_class, solver,
len(classes))
if solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
# Use the label encoded classes
n_classes = len(encoded_labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
encoded_labels = encoded_labels[1:]
classes = classes[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
if multi_class == 'multinomial':
iter_encoded_labels = iter_classes = [None]
else:
iter_encoded_labels = encoded_labels
iter_classes = classes
# compute the class weights for the entire dataset y
if class_weight == "balanced":
class_weight = compute_class_weight(class_weight,
np.arange(len(self.classes_)),
y)
class_weight = dict(enumerate(class_weight))
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if self.solver in ['sag', 'saga']:
prefer = 'threads'
else:
prefer = 'processes'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer=prefer))(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio
)
for label in iter_encoded_labels
for train, test in folds
for l1_ratio in l1_ratios_)
# _log_reg_scoring_path will output different shapes depending on the
# multi_class param, so we need to reshape the outputs accordingly.
# Cs is of shape (n_classes . n_folds . n_l1_ratios, n_Cs) and all the
# rows are equal, so we just take the first one.
# After reshaping,
# - scores is of shape (n_classes, n_folds, n_Cs . n_l1_ratios)
# - coefs_paths is of shape
# (n_classes, n_folds, n_Cs . n_l1_ratios, n_features)
# - n_iter is of shape
# (n_classes, n_folds, n_Cs . n_l1_ratios) or
# (1, n_folds, n_Cs . n_l1_ratios)
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
if multi_class == 'multinomial':
coefs_paths = np.reshape(
coefs_paths,
(len(folds), len(l1_ratios_) * len(self.Cs_), n_classes, -1)
)
# equiv to coefs_paths = np.moveaxis(coefs_paths, (0, 1, 2, 3),
# (1, 2, 0, 3))
coefs_paths = np.swapaxes(coefs_paths, 0, 1)
coefs_paths = np.swapaxes(coefs_paths, 0, 2)
self.n_iter_ = np.reshape(
n_iter_,
(1, len(folds), len(self.Cs_) * len(l1_ratios_))
)
# repeat same scores across all classes
scores = np.tile(scores, (n_classes, 1, 1))
else:
coefs_paths = np.reshape(
coefs_paths,
(n_classes, len(folds), len(self.Cs_) * len(l1_ratios_),
-1)
)
self.n_iter_ = np.reshape(
n_iter_,
(n_classes, len(folds), len(self.Cs_) * len(l1_ratios_))
)
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(classes, scores))
self.coefs_paths_ = dict(zip(classes, coefs_paths))
self.C_ = list()
self.l1_ratio_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
for index, (cls, encoded_label) in enumerate(
zip(iter_classes, iter_encoded_labels)):
if multi_class == 'ovr':
scores = self.scores_[cls]
coefs_paths = self.coefs_paths_[cls]
else:
# For multinomial, all scores are the same across classes
scores = scores[0]
# coefs_paths will keep its original shape because
# logistic_regression_path expects it this way
if self.refit:
# best_index is between 0 and (n_Cs . n_l1_ratios - 1)
# for example, with n_cs=2 and n_l1_ratios=3
# the layout of scores is
# [c1, c2, c1, c2, c1, c2]
# l1_1 , l1_2 , l1_3
best_index = scores.sum(axis=0).argmax()
best_index_C = best_index % len(self.Cs_)
C_ = self.Cs_[best_index_C]
self.C_.append(C_)
best_index_l1 = best_index // len(self.Cs_)
l1_ratio_ = l1_ratios_[best_index_l1]
self.l1_ratio_.append(l1_ratio_)
if multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, :, best_index, :],
axis=1)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
# Note that y is label encoded and hence pos_class must be
# the encoded label / None (for 'multinomial')
w, _, _ = _logistic_regression_path(
X, y, pos_class=encoded_label, Cs=[C_], solver=solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=class_weight,
multi_class=multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio_)
w = w[0]
else:
# Take the best scores across every fold and the average of
# all coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
if multi_class == 'ovr':
w = np.mean([coefs_paths[i, best_indices[i], :]
for i in range(len(folds))], axis=0)
else:
w = np.mean([coefs_paths[:, i, best_indices[i], :]
for i in range(len(folds))], axis=0)
best_indices_C = best_indices % len(self.Cs_)
self.C_.append(np.mean(self.Cs_[best_indices_C]))
if self.penalty == 'elasticnet':
best_indices_l1 = best_indices // len(self.Cs_)
self.l1_ratio_.append(np.mean(l1_ratios_[best_indices_l1]))
else:
self.l1_ratio_.append(None)
if multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.l1_ratio_ = np.tile(self.l1_ratio_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
self.l1_ratio_ = np.asarray(self.l1_ratio_)
self.l1_ratios_ = np.asarray(l1_ratios_)
# if elasticnet was used, add the l1_ratios dimension to some
# attributes
if self.l1_ratios is not None:
for cls, coefs_path in self.coefs_paths_.items():
self.coefs_paths_[cls] = coefs_path.reshape(
(len(folds), self.Cs_.size, self.l1_ratios_.size, -1))
for cls, score in self.scores_.items():
self.scores_[cls] = score.reshape(
(len(folds), self.Cs_.size, self.l1_ratios_.size))
self.n_iter_ = self.n_iter_.reshape(
(-1, len(folds), self.Cs_.size, self.l1_ratios_.size))
return self
def score(self, X, y, sample_weight=None):
"""Returns the score using the `scoring` option on the given
test data and labels.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples,)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Score of self.predict(X) wrt. y.
"""
if self.scoring is not None:
warnings.warn("The long-standing behavior to use the "
"accuracy score has changed. The scoring "
"parameter is now used. "
"This warning will disappear in version 0.22.",
ChangedBehaviorWarning)
scoring = self.scoring or 'accuracy'
if isinstance(scoring, str):
scoring = get_scorer(scoring)
return scoring(self, X, y, sample_weight=sample_weight)
| bsd-3-clause |
huazhisong/graduate_text | src/contrib_cnn/data_helper.py | 2 | 2520 | #!/usr/lib/env python
# -*- code:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelBinarizer
from tensorflow.contrib import learn
import tensorflow as tf
from tensorflow.python.platform import gfile
Dataset = collections.namedtuple('Dataset', ['data', 'target'])
Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])
def load_data(data_file, label_file):
"""Load dataset from CSV file without a header row."""
with gfile.Open(data_file) as data:
with gfile.Open(label_file) as labels:
lines_data = csv.reader(data)
lines_labels = csv.reader(labels)
data, target = [], []
for d, l in zip(lines_data, lines_labels):
target.append(l)
data.append(d)
target = np.array(target)
data = np.array(data)
return Dataset(data=data, target=target)
def load_data_labels(data_file, dev_sample_percentage=0.2):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
data = pd.read_csv(data_file, encoding='latin-1')
x = data.text
y = data.fixer
# TODO: This is very crude, should use cross-validation
dev_sample_index = -1 * int(dev_sample_percentage * float(len(y)))
x_train, x_dev = x[:dev_sample_index], x[dev_sample_index:]
y_train, y_dev = y[:dev_sample_index], y[dev_sample_index:]
# 处理training data
# document length取90%的分位数
document_length_df = pd.DataFrame([len(xx.split(" ")) for xx in x_train])
document_length = np.int64(document_length_df.quantile(0.8))
tf.summary.scalar("document_len", document_length)
vocabulary_processor = learn.preprocessing.VocabularyProcessor(document_length)
x_train = vocabulary_processor.fit_transform(x_train)
x_dev = vocabulary_processor.transform(x_dev)
# 处理label
lb = LabelBinarizer()
y_train = lb.fit_transform(y_train)
y_dev = lb.transform(y_dev)
print("Document length: %d" % document_length)
print("Vocabulary Size: {:d}".format(len(vocabulary_processor.vocabulary_)))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
return x_train, y_train, x_dev, y_dev, vocabulary_processor
| agpl-3.0 |
tamasgal/km3pipe | km3pipe/io/hdf5.py | 1 | 37296 | # Filename: hdf5.py
# pylint: disable=C0103,R0903,C901
# vim:set ts=4 sts=4 sw=4 et:
"""
Read and write KM3NeT-formatted HDF5 files.
"""
from collections import OrderedDict, defaultdict, namedtuple
from functools import singledispatch
import os.path
import warnings
from uuid import uuid4
import numpy as np
import tables as tb
import km3io
from thepipe import Provenance
try:
from numba import jit
except ImportError:
jit = lambda f: f
import km3pipe as kp
from thepipe import Module, Blob
from km3pipe.dataclasses import Table, NDArray
from km3pipe.logger import get_logger
from km3pipe.tools import decamelise, camelise, split, istype
log = get_logger(__name__) # pylint: disable=C0103
__author__ = "Tamas Gal and Moritz Lotze and Michael Moser"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal and Moritz Lotze"
__email__ = "[email protected]"
__status__ = "Development"
FORMAT_VERSION = np.string_("5.1")
MINIMUM_FORMAT_VERSION = np.string_("4.1")
class H5VersionError(Exception):
pass
def check_version(h5file):
try:
version = np.string_(h5file.root._v_attrs.format_version)
except AttributeError:
log.error(
"Could not determine HDF5 format version: '%s'."
"You may encounter unexpected errors! Good luck..." % h5file.filename
)
return
if split(version, int, np.string_(".")) < split(
MINIMUM_FORMAT_VERSION, int, np.string_(".")
):
raise H5VersionError(
"HDF5 format version {0} or newer required!\n"
"'{1}' has HDF5 format version {2}.".format(
MINIMUM_FORMAT_VERSION.decode("utf-8"),
h5file.filename,
version.decode("utf-8"),
)
)
class HDF5Header(object):
"""Wrapper class for the `/raw_header` table in KM3HDF5
Parameters
----------
data : dict(str, str/tuple/dict/OrderedDict)
The actual header data, consisting of a key and an entry.
If possible, the key will be set as a property and the the values will
be converted to namedtuples (fields sorted by name to ensure consistency
when dictionaries are provided).
"""
def __init__(self, data):
self._data = data
self._user_friendly_data = {} # namedtuples, if possible
self._set_attributes()
def _set_attributes(self):
"""Traverse the internal dictionary and set the getters"""
for parameter in list(self._data.keys()):
data = self._data[parameter]
if isinstance(data, dict) or isinstance(data, OrderedDict):
if not all(f.isidentifier() for f in data.keys()):
break
# Create a namedtuple for easier access
field_names, field_values = zip(*data.items())
sorted_indices = np.argsort(field_names)
clsname = "HeaderEntry" if not parameter.isidentifier() else parameter
nt = namedtuple(clsname, [field_names[i] for i in sorted_indices])
data = nt(*[field_values[i] for i in sorted_indices])
if parameter.isidentifier():
setattr(self, parameter, data)
self._user_friendly_data[parameter] = data
def __getitem__(self, key):
return self._user_friendly_data[key]
def keys(self):
return self._user_friendly_data.keys()
def values(self):
return self._user_friendly_data.values()
def items(self):
return self._user_friendly_data.items()
@classmethod
def from_table(cls, table):
data = OrderedDict()
for i in range(len(table)):
parameter = table["parameter"][i].decode()
field_names = table["field_names"][i].decode().split(" ")
field_values = table["field_values"][i].decode().split(" ")
if field_values == [""]:
log.info("No value for parameter '{}'! Skipping...".format(parameter))
continue
dtypes = table["dtype"][i].decode()
dtyped_values = []
for dtype, value in zip(dtypes.split(" "), field_values):
if dtype.startswith("a"):
dtyped_values.append(value)
else:
value = np.fromstring(value, dtype=dtype, sep=" ")[0]
dtyped_values.append(value)
data[parameter] = OrderedDict(zip(field_names, dtyped_values))
return cls(data)
@classmethod
def from_km3io(cls, header):
if not isinstance(header, km3io.offline.Header):
raise TypeError(
"The given header object is not an instance of km3io.offline.Header"
)
return cls(header._data)
@classmethod
def from_aanet(cls, table):
data = OrderedDict()
for i in range(len(table)):
parameter = table["parameter"][i].astype(str)
field_names = [n.decode() for n in table["field_names"][i].split()]
field_values = [n.decode() for n in table["field_values"][i].split()]
if field_values in [[b""], []]:
log.info("No value for parameter '{}'! Skipping...".format(parameter))
continue
dtypes = table["dtype"][i]
dtyped_values = []
for dtype, value in zip(dtypes.split(), field_values):
if dtype.startswith(b"a"):
dtyped_values.append(value)
else:
value = np.fromstring(value, dtype=dtype, sep=" ")[0]
dtyped_values.append(value)
data[parameter] = OrderedDict(zip(field_names, dtyped_values))
return cls(data)
@classmethod
def from_hdf5(cls, filename):
with tb.open_file(filename, "r") as f:
table = f.get_node("/raw_header")
return cls.from_pytable(table)
@classmethod
def from_pytable(cls, table):
data = OrderedDict()
for row in table:
parameter = row["parameter"].decode()
field_names = row["field_names"].decode().split(" ")
field_values = row["field_values"].decode().split(" ")
if field_values == [""]:
log.info("No value for parameter '{}'! Skipping...".format(parameter))
continue
dtypes = row["dtype"].decode()
dtyped_values = []
for dtype, value in zip(dtypes.split(" "), field_values):
if dtype.startswith("a"):
dtyped_values.append(value)
else:
value = np.fromstring(value, dtype=dtype, sep=" ")[0]
dtyped_values.append(value)
data[parameter] = OrderedDict(zip(field_names, dtyped_values))
return cls(data)
class HDF5IndexTable:
def __init__(self, h5loc, start=0):
self.h5loc = h5loc
self._data = defaultdict(list)
self._index = 0
if start > 0:
self._data["indices"] = [0] * start
self._data["n_items"] = [0] * start
def append(self, n_items):
self._data["indices"].append(self._index)
self._data["n_items"].append(n_items)
self._index += n_items
@property
def data(self):
return self._data
def fillup(self, length):
missing = length - len(self)
self._data["indices"] += [self.data["indices"][-1]] * missing
self._data["n_items"] += [0] * missing
def __len__(self):
return len(self.data["indices"])
class HDF5Sink(Module):
"""Write KM3NeT-formatted HDF5 files, event-by-event.
The data can be a ``kp.Table``, a numpy structured array,
a pandas DataFrame, or a simple scalar.
The name of the corresponding H5 table is the decamelised
blob-key, so values which are stored in the blob under `FooBar`
will be written to `/foo_bar` in the HDF5 file.
Parameters
----------
filename: str, optional [default: 'dump.h5']
Where to store the events.
h5file: pytables.File instance, optional [default: None]
Opened file to write to. This is mutually exclusive with filename.
keys: list of strings, optional
List of Blob-keys to write, everything else is ignored.
complib : str [default: zlib]
Compression library that should be used.
'zlib', 'lzf', 'blosc' and all other PyTables filters
are available.
complevel : int [default: 5]
Compression level.
chunksize : int [optional]
Chunksize that should be used for saving along the first axis
of the input array.
flush_frequency: int, optional [default: 500]
The number of iterations to cache tables and arrays before
dumping to disk.
pytab_file_args: dict [optional]
pass more arguments to the pytables File init
n_rows_expected = int, optional [default: 10000]
append: bool, optional [default: False]
reset_group_id: bool, optional [default: True]
Resets the group_id so that it's continuous in the output file.
Use this with care!
Notes
-----
Provides service write_table(tab, h5loc=None): tab:Table, h5loc:str
The table to write, with ".h5loc" set or to h5loc if specified.
"""
def configure(self):
self.filename = self.get("filename", default="dump.h5")
self.ext_h5file = self.get("h5file")
self.keys = self.get("keys", default=[])
self.complib = self.get("complib", default="zlib")
self.complevel = self.get("complevel", default=5)
self.chunksize = self.get("chunksize")
self.flush_frequency = self.get("flush_frequency", default=500)
self.pytab_file_args = self.get("pytab_file_args", default=dict())
self.file_mode = "a" if self.get("append") else "w"
self.keep_open = self.get("keep_open")
self._reset_group_id = self.get("reset_group_id", default=True)
self.indices = {} # to store HDF5IndexTables for each h5loc
self._singletons_written = {}
# magic 10000: this is the default of the "expectedrows" arg
# from the tables.File.create_table() function
# at least according to the docs
# might be able to set to `None`, I don't know...
self.n_rows_expected = self.get("n_rows_expected", default=10000)
self.index = 0
self._uuid = str(uuid4())
self.expose(self.write_table, "write_table")
if self.ext_h5file is not None:
self.h5file = self.ext_h5file
else:
self.h5file = tb.open_file(
self.filename,
mode=self.file_mode,
title="KM3NeT",
**self.pytab_file_args,
)
Provenance().record_output(
self.filename, uuid=self._uuid, comment="HDF5Sink output"
)
self.filters = tb.Filters(
complevel=self.complevel,
shuffle=True,
fletcher32=True,
complib=self.complib,
)
self._tables = OrderedDict()
self._ndarrays = OrderedDict()
self._ndarrays_cache = defaultdict(list)
def _to_array(self, data, name=None):
if data is None:
return
if np.isscalar(data):
self.log.debug("toarray: is a scalar")
return Table(
{name: np.asarray(data).reshape((1,))},
h5loc="/misc/{}".format(decamelise(name)),
name=name,
)
if hasattr(data, "len") and len(data) <= 0: # a bit smelly ;)
self.log.debug("toarray: data has no length")
return
# istype instead isinstance, to avoid heavy pandas import (hmmm...)
if istype(data, "DataFrame"): # noqa
self.log.debug("toarray: pandas dataframe")
data = Table.from_dataframe(data)
return data
def _cache_ndarray(self, arr):
self._ndarrays_cache[arr.h5loc].append(arr)
def _write_ndarrays_cache_to_disk(self):
"""Writes all the cached NDArrays to disk and empties the cache"""
for h5loc, arrs in self._ndarrays_cache.items():
title = arrs[0].title
chunkshape = (
(self.chunksize,) + arrs[0].shape[1:]
if self.chunksize is not None
else None
)
arr = NDArray(np.concatenate(arrs), h5loc=h5loc, title=title)
if h5loc not in self._ndarrays:
loc, tabname = os.path.split(h5loc)
ndarr = self.h5file.create_earray(
loc,
tabname,
tb.Atom.from_dtype(arr.dtype),
(0,) + arr.shape[1:],
chunkshape=chunkshape,
title=title,
filters=self.filters,
createparents=True,
)
self._ndarrays[h5loc] = ndarr
else:
ndarr = self._ndarrays[h5loc]
# for arr_length in (len(a) for a in arrs):
# self._record_index(h5loc, arr_length)
ndarr.append(arr)
self._ndarrays_cache = defaultdict(list)
def write_table(self, table, h5loc=None):
"""Write a single table to the HDF5 file, exposed as a service"""
self.log.debug("Writing table %s", table.name)
if h5loc is None:
h5loc = table.h5loc
self._write_table(h5loc, table, table.name)
def _write_table(self, h5loc, arr, title):
level = len(h5loc.split("/"))
if h5loc not in self._tables:
dtype = arr.dtype
if any("U" in str(dtype.fields[f][0]) for f in dtype.fields):
self.log.error(
"Cannot write data to '{}'. Unicode strings are not supported!".format(
h5loc
)
)
return
loc, tabname = os.path.split(h5loc)
self.log.debug(
"h5loc '{}', Loc '{}', tabname '{}'".format(h5loc, loc, tabname)
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", tb.NaturalNameWarning)
tab = self.h5file.create_table(
loc,
tabname,
chunkshape=self.chunksize,
description=dtype,
title=title,
filters=self.filters,
createparents=True,
expectedrows=self.n_rows_expected,
)
tab._v_attrs.datatype = title
if level < 5:
self._tables[h5loc] = tab
else:
tab = self._tables[h5loc]
h5_colnames = set(tab.colnames)
tab_colnames = set(arr.dtype.names)
if h5_colnames != tab_colnames:
missing_cols = h5_colnames - tab_colnames
if missing_cols:
self.log.info("Missing columns in table, trying to append NaNs.")
arr = arr.append_columns(
missing_cols, np.full((len(missing_cols), len(arr)), np.nan)
)
if arr.dtype != tab.dtype:
self.log.error(
"Differing dtypes after appending "
"missing columns to the table! Skipping..."
)
return
if arr.dtype != tab.dtype:
try:
arr = Table(arr, dtype=tab.dtype)
except ValueError:
self.log.critical(
"Cannot write a table to '%s' since its dtype is "
"different compared to the previous table with the same "
"HDF5 location, which was used to fix the dtype of the "
"HDF5 compund type." % h5loc
)
raise
tab.append(arr)
if level < 4:
tab.flush()
def _write_separate_columns(self, where, obj, title):
f = self.h5file
loc, group_name = os.path.split(where)
if where not in f:
group = f.create_group(loc, group_name, title, createparents=True)
group._v_attrs.datatype = title
else:
group = f.get_node(where)
for col, (dt, _) in obj.dtype.fields.items():
data = obj.__array__()[col]
if col not in group:
a = tb.Atom.from_dtype(dt)
arr = f.create_earray(
group, col, a, (0,), col.capitalize(), filters=self.filters
)
else:
arr = getattr(group, col)
arr.append(data)
# create index table
# if where not in self.indices:
# self.indices[where] = HDF5IndexTable(where + "/_indices", start=self.index)
self._record_index(where, len(data), split=True)
def _process_entry(self, key, entry):
self.log.debug("Inspecting {}".format(key))
if (
hasattr(entry, "h5singleton")
and entry.h5singleton
and entry.h5loc in self._singletons_written
):
self.log.debug(
"Skipping '%s' since it's a singleton and already written."
% entry.h5loc
)
return
if not hasattr(entry, "h5loc"):
self.log.debug("Ignoring '%s': no h5loc attribute" % key)
return
if isinstance(entry, NDArray):
self._cache_ndarray(entry)
self._record_index(entry.h5loc, len(entry))
return entry
try:
title = entry.name
except AttributeError:
title = key
if isinstance(entry, Table) and not entry.h5singleton:
if "group_id" not in entry:
entry = entry.append_columns("group_id", self.index)
elif self._reset_group_id:
# reset group_id to the HDF5Sink's continuous counter
entry.group_id = self.index
self.log.debug("h5l: '{}', title '{}'".format(entry.h5loc, title))
if hasattr(entry, "split_h5") and entry.split_h5:
self.log.debug("Writing into separate columns...")
self._write_separate_columns(entry.h5loc, entry, title=title)
else:
self.log.debug("Writing into single Table...")
self._write_table(entry.h5loc, entry, title=title)
if hasattr(entry, "h5singleton") and entry.h5singleton:
self._singletons_written[entry.h5loc] = True
return entry
def process(self, blob):
written_blob = Blob()
for key, entry in sorted(blob.items()):
if self.keys and key not in self.keys:
self.log.info("Skipping blob, since it's not in the keys list")
continue
self.log.debug("Processing %s", key)
data = self._process_entry(key, entry)
if data is not None:
written_blob[key] = data
if "GroupInfo" not in blob:
gi = Table(
{"group_id": self.index, "blob_length": len(written_blob)},
h5loc="/group_info",
name="Group Info",
)
self._process_entry("GroupInfo", gi)
# fill up NDArray indices with 0 entries if needed
if written_blob:
ndarray_h5locs = set(self._ndarrays.keys()).union(
self._ndarrays_cache.keys()
)
written_h5locs = set(
e.h5loc for e in written_blob.values() if isinstance(e, NDArray)
)
missing_h5locs = ndarray_h5locs - written_h5locs
for h5loc in missing_h5locs:
self.log.info("Filling up %s with 0 length entry", h5loc)
self._record_index(h5loc, 0)
if not self.index % self.flush_frequency:
self.flush()
self.index += 1
return blob
def _record_index(self, h5loc, count, split=False):
"""Add an index entry (optionally create table) for an NDArray h5loc.
Parameters
----------
h5loc : str
location in HDF5
count : int
number of elements (can be 0)
split : bool
if it's a split table
"""
suffix = "/_indices" if split else "_indices"
idx_table_h5loc = h5loc + suffix
if idx_table_h5loc not in self.indices:
self.indices[idx_table_h5loc] = HDF5IndexTable(
idx_table_h5loc, start=self.index
)
idx_tab = self.indices[idx_table_h5loc]
idx_tab.append(count)
def flush(self):
"""Flush tables and arrays to disk"""
self.log.info("Flushing tables and arrays to disk...")
for tab in self._tables.values():
tab.flush()
self._write_ndarrays_cache_to_disk()
def finish(self):
self.flush()
self.h5file.root._v_attrs.km3pipe = np.string_(kp.__version__)
self.h5file.root._v_attrs.pytables = np.string_(tb.__version__)
self.h5file.root._v_attrs.kid = np.string_(self._uuid)
self.h5file.root._v_attrs.format_version = np.string_(FORMAT_VERSION)
self.log.info("Adding index tables.")
for where, idx_tab in self.indices.items():
# any skipped NDArrays or split groups will be filled with 0 entries
idx_tab.fillup(self.index)
self.log.debug("Creating index table for '%s'" % where)
h5loc = idx_tab.h5loc
self.log.info(" -> {0}".format(h5loc))
indices = Table(
{"index": idx_tab.data["indices"], "n_items": idx_tab.data["n_items"]},
h5loc=h5loc,
)
self._write_table(h5loc, indices, title="Indices")
self.log.info(
"Creating pytables index tables. " "This may take a few minutes..."
)
for tab in self._tables.values():
if "frame_id" in tab.colnames:
tab.cols.frame_id.create_index()
if "slice_id" in tab.colnames:
tab.cols.slice_id.create_index()
if "dom_id" in tab.colnames:
tab.cols.dom_id.create_index()
if "event_id" in tab.colnames:
try:
tab.cols.event_id.create_index()
except NotImplementedError:
log.warning(
"Table '{}' has an uint64 column, "
"not indexing...".format(tab._v_name)
)
if "group_id" in tab.colnames:
try:
tab.cols.group_id.create_index()
except NotImplementedError:
log.warning(
"Table '{}' has an uint64 column, "
"not indexing...".format(tab._v_name)
)
tab.flush()
if "HDF5MetaData" in self.services:
self.log.info("Writing HDF5 meta data.")
metadata = self.services["HDF5MetaData"]
for name, value in metadata.items():
self.h5file.set_node_attr("/", name, value)
if not self.keep_open:
self.h5file.close()
self.cprint("HDF5 file written to: {}".format(self.filename))
class HDF5Pump(Module):
"""Read KM3NeT-formatted HDF5 files, event-by-event.
Parameters
----------
filename: str
From where to read events. Either this OR ``filenames`` needs to be
defined.
skip_version_check: bool [default: False]
Don't check the H5 version. Might lead to unintended consequences.
shuffle: bool, optional [default: False]
Shuffle the group_ids, so that the blobs are mixed up.
shuffle_function: function, optional [default: np.random.shuffle
The function to be used to shuffle the group IDs.
reset_index: bool, optional [default: True]
When shuffle is set to true, reset the group ID - start to count
the group_id by 0.
Notes
-----
Provides service h5singleton(h5loc): h5loc:str -> kp.Table
Singleton tables for a given HDF5 location.
"""
def configure(self):
self.filename = self.get("filename")
self.skip_version_check = self.get("skip_version_check", default=False)
self.verbose = bool(self.get("verbose"))
self.shuffle = self.get("shuffle", default=False)
self.shuffle_function = self.get("shuffle_function", default=np.random.shuffle)
self.reset_index = self.get("reset_index", default=False)
self.h5file = None
self.cut_mask = None
self.indices = {}
self._tab_indices = {}
self._singletons = {}
self.header = None
self.group_ids = None
self._n_groups = None
self.index = 0
self.h5file = tb.open_file(self.filename, "r")
Provenance().record_input(self.filename, comment="HDF5Pump input")
if not self.skip_version_check:
check_version(self.h5file)
self._read_group_info()
self.expose(self.h5singleton, "h5singleton")
def _read_group_info(self):
h5file = self.h5file
if "/group_info" not in h5file:
self.log.critical("Missing /group_info '%s', aborting..." % h5file.filename)
raise SystemExit
self.log.info("Reading group information from '/group_info'.")
group_info = h5file.get_node("/", "group_info")
self.group_ids = group_info.cols.group_id[:]
self._n_groups = len(self.group_ids)
if "/raw_header" in h5file:
self.log.info("Reading /raw_header")
try:
self.header = HDF5Header.from_pytable(h5file.get_node("/raw_header"))
except TypeError:
self.log.error("Could not parse the raw header, skipping!")
if self.shuffle:
self.log.info("Shuffling group IDs")
self.shuffle_function(self.group_ids)
def h5singleton(self, h5loc):
"""Returns the singleton table for a given HDF5 location"""
return self._singletons[h5loc]
def process(self, blob):
self.log.info("Reading blob at index %s" % self.index)
if self.index >= self._n_groups:
self.log.info("All groups are read.")
raise StopIteration
blob = self.get_blob(self.index)
self.index += 1
return blob
def get_blob(self, index):
blob = Blob()
group_id = self.group_ids[index]
# skip groups with separate columns
# and deal with them later
# this should be solved using hdf5 attributes in near future
split_table_locs = []
ndarray_locs = []
for tab in self.h5file.walk_nodes(classname="Table"):
h5loc = tab._v_pathname
loc, tabname = os.path.split(h5loc)
if tabname in self.indices:
self.log.info("index table '%s' already read, skip..." % h5loc)
continue
if loc in split_table_locs:
self.log.info("get_blob: '%s' is noted, skip..." % h5loc)
continue
if tabname == "_indices":
self.log.debug("get_blob: found index table '%s'" % h5loc)
split_table_locs.append(loc)
self.indices[loc] = self.h5file.get_node(h5loc)
continue
if tabname.endswith("_indices"):
self.log.debug("get_blob: found index table '%s' for NDArray" % h5loc)
ndarr_loc = h5loc.replace("_indices", "")
ndarray_locs.append(ndarr_loc)
if ndarr_loc in self.indices:
self.log.info(
"index table for NDArray '%s' already read, skip..." % ndarr_loc
)
continue
_index_table = self.h5file.get_node(h5loc)
self.indices[ndarr_loc] = {
"index": _index_table.col("index")[:],
"n_items": _index_table.col("n_items")[:],
}
continue
tabname = camelise(tabname)
if "group_id" in tab.dtype.names:
try:
if h5loc not in self._tab_indices:
self._read_tab_indices(h5loc)
tab_idx_start = self._tab_indices[h5loc][0][group_id]
tab_n_items = self._tab_indices[h5loc][1][group_id]
if tab_n_items == 0:
continue
arr = tab[tab_idx_start : tab_idx_start + tab_n_items]
except IndexError:
self.log.debug("No data for h5loc '%s'" % h5loc)
continue
except NotImplementedError:
# 64-bit unsigned integer columns like ``group_id``
# are not yet supported in conditions
self.log.debug(
"get_blob: found uint64 column at '{}'...".format(h5loc)
)
arr = tab.read()
arr = arr[arr["group_id"] == group_id]
except ValueError:
# "there are no columns taking part
# in condition ``group_id == 0``"
self.log.info(
"get_blob: no `%s` column found in '%s'! "
"skipping... " % ("group_id", h5loc)
)
continue
else:
if h5loc not in self._singletons:
log.info("Caching H5 singleton: {} ({})".format(tabname, h5loc))
self._singletons[h5loc] = Table(
tab.read(),
h5loc=h5loc,
split_h5=False,
name=tabname,
h5singleton=True,
)
blob[tabname] = self._singletons[h5loc]
continue
self.log.debug("h5loc: '{}'".format(h5loc))
tab = Table(arr, h5loc=h5loc, split_h5=False, name=tabname)
if self.shuffle and self.reset_index:
tab.group_id[:] = index
blob[tabname] = tab
# skipped locs are now column wise datasets (usually hits)
# currently hardcoded, in future using hdf5 attributes
# to get the right constructor
for loc in split_table_locs:
# if some events are missing (group_id not continuous),
# this does not work as intended
# idx, n_items = self.indices[loc][group_id]
idx = self.indices[loc].col("index")[group_id]
n_items = self.indices[loc].col("n_items")[group_id]
end = idx + n_items
node = self.h5file.get_node(loc)
columns = (c for c in node._v_children if c != "_indices")
data = {}
for col in columns:
data[col] = self.h5file.get_node(loc + "/" + col)[idx:end]
tabname = camelise(loc.split("/")[-1])
s_tab = Table(data, h5loc=loc, split_h5=True, name=tabname)
if self.shuffle and self.reset_index:
s_tab.group_id[:] = index
blob[tabname] = s_tab
if self.header is not None:
blob["Header"] = self.header
for ndarr_loc in ndarray_locs:
self.log.info("Reading %s" % ndarr_loc)
try:
idx = self.indices[ndarr_loc]["index"][group_id]
n_items = self.indices[ndarr_loc]["n_items"][group_id]
except IndexError:
continue
end = idx + n_items
ndarr = self.h5file.get_node(ndarr_loc)
ndarr_name = camelise(ndarr_loc.split("/")[-1])
_ndarr = NDArray(
ndarr[idx:end], h5loc=ndarr_loc, title=ndarr.title, group_id=group_id
)
if self.shuffle and self.reset_index:
_ndarr.group_id = index
blob[ndarr_name] = _ndarr
return blob
def _read_tab_indices(self, h5loc):
self.log.info("Reading table indices for '{}'".format(h5loc))
node = self.h5file.get_node(h5loc)
group_ids = None
if "group_id" in node.dtype.names:
group_ids = self.h5file.get_node(h5loc).cols.group_id[:]
else:
self.log.error("No data found in '{}'".format(h5loc))
return
self._tab_indices[h5loc] = create_index_tuple(group_ids)
def __len__(self):
self.log.info("Opening all HDF5 files to check the number of groups")
n_groups = 0
for filename in self.filenames:
with tb.open_file(filename, "r") as h5file:
group_info = h5file.get_node("/", "group_info")
self.group_ids = group_info.cols.group_id[:]
n_groups += len(self.group_ids)
return n_groups
def __iter__(self):
return self
def __next__(self):
# TODO: wrap that in self._check_if_next_file_is_needed(self.index)
if self.index >= self._n_groups:
self.log.info("All groups are read")
raise StopIteration
blob = self.get_blob(self.index)
self.index += 1
return blob
def __getitem__(self, index):
if isinstance(index, int):
return self.get_blob(index)
elif isinstance(index, slice):
return self._slice_generator(index)
else:
raise TypeError("index must be int or slice")
def _slice_generator(self, index):
"""A simple slice generator for iterations"""
start, stop, step = index.indices(len(self))
for i in range(start, stop, step):
yield self.get_blob(i)
self.filename = None
def _close_h5file(self):
if self.h5file:
self.h5file.close()
def finish(self):
self._close_h5file()
@jit
def create_index_tuple(group_ids):
"""An helper function to create index tuples for fast lookup in HDF5Pump"""
max_group_id = np.max(group_ids)
start_idx_arr = np.full(max_group_id + 1, 0)
n_items_arr = np.full(max_group_id + 1, 0)
current_group_id = group_ids[0]
current_idx = 0
item_count = 0
for group_id in group_ids:
if group_id != current_group_id:
start_idx_arr[current_group_id] = current_idx
n_items_arr[current_group_id] = item_count
current_idx += item_count
item_count = 0
current_group_id = group_id
item_count += 1
else:
start_idx_arr[current_group_id] = current_idx
n_items_arr[current_group_id] = item_count
return (start_idx_arr, n_items_arr)
class HDF5MetaData(Module):
"""Metadata to attach to the HDF5 file.
Parameters
----------
data: dict
"""
def configure(self):
self.data = self.require("data")
self.expose(self.data, "HDF5MetaData")
@singledispatch
def header2table(data):
"""Convert a header to an `HDF5Header` compliant `kp.Table`"""
print(f"Unsupported header data of type {type(data)}")
@header2table.register(dict)
def _(header_dict):
if not header_dict:
print("Empty header dictionary.")
return
tab_dict = defaultdict(list)
for parameter, data in header_dict.items():
fields = []
values = []
types = []
for field_name, field_value in data.items():
fields.append(field_name)
values.append(str(field_value))
try:
_ = float(field_value) # noqa
types.append("f4")
except ValueError:
types.append("a{}".format(len(field_value)))
except TypeError: # e.g. values is None
types.append("a{}".format(len(str(field_value))))
tab_dict["parameter"].append(parameter.encode())
tab_dict["field_names"].append(" ".join(fields).encode())
tab_dict["field_values"].append(" ".join(values).encode())
tab_dict["dtype"].append(" ".join(types).encode())
log.debug(
"{}: {} {} {}".format(
tab_dict["parameter"][-1],
tab_dict["field_names"][-1],
tab_dict["field_values"][-1],
tab_dict["dtype"][-1],
)
)
return Table(tab_dict, h5loc="/raw_header", name="RawHeader", h5singleton=True)
@header2table.register(km3io.offline.Header)
def _(header):
out = {}
for parameter, values in header._data.items():
try:
values = values._asdict()
except AttributeError:
# single entry without further parameter name
# in specification
values = {parameter + "_0": values}
out[parameter] = values
return header2table(out)
@header2table.register(HDF5Header)
def _(header):
return header2table(header._data)
| mit |
zfrenchee/pandas | pandas/tests/groupby/aggregate/test_other.py | 1 | 17999 | # -*- coding: utf-8 -*-
"""
test all other .agg behavior
"""
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
from functools import partial
import numpy as np
import pandas as pd
from pandas import date_range, DataFrame, Index, MultiIndex, Series
from pandas.core.groupby import SpecificationError
from pandas.io.formats.printing import pprint_thing
import pandas.util.testing as tm
def test_agg_api():
# GH 6337
# http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
# different api for agg when passed custom function with mixed frame
df = DataFrame({'data1': np.random.randn(5),
'data2': np.random.randn(5),
'key1': ['a', 'a', 'b', 'b', 'a'],
'key2': ['one', 'two', 'one', 'two', 'one']})
grouped = df.groupby('key1')
def peak_to_peak(arr):
return arr.max() - arr.min()
expected = grouped.agg([peak_to_peak])
expected.columns = ['data1', 'data2']
result = grouped.agg(peak_to_peak)
tm.assert_frame_equal(result, expected)
def test_agg_datetimes_mixed():
data = [[1, '2012-01-01', 1.0],
[2, '2012-01-02', 2.0],
[3, None, 3.0]]
df1 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
data = [[row[0],
datetime.strptime(row[1], '%Y-%m-%d').date() if row[1] else None,
row[2]]
for row in data]
df2 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
df1['weights'] = df1['value'] / df1['value'].sum()
gb1 = df1.groupby('date').aggregate(np.sum)
df2['weights'] = df1['value'] / df1['value'].sum()
gb2 = df2.groupby('date').aggregate(np.sum)
assert (len(gb1) == len(gb2))
def test_agg_period_index():
from pandas import period_range, PeriodIndex
prng = period_range('2012-1-1', freq='M', periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
assert isinstance(rs.index, PeriodIndex)
# GH 3579
index = period_range(start='1999-01', periods=5, freq='M')
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
series = [('s1', s1), ('s2', s2)]
df = DataFrame.from_items(series)
grouped = df.groupby(df.index.month)
list(grouped)
def test_agg_dict_parameter_cast_result_dtypes():
# GH 12821
df = DataFrame({'class': ['A', 'A', 'B', 'B', 'C', 'C', 'D', 'D'],
'time': date_range('1/1/2011', periods=8, freq='H')})
df.loc[[0, 1, 2, 5], 'time'] = None
# test for `first` function
exp = df.loc[[0, 3, 4, 6]].set_index('class')
grouped = df.groupby('class')
tm.assert_frame_equal(grouped.first(), exp)
tm.assert_frame_equal(grouped.agg('first'), exp)
tm.assert_frame_equal(grouped.agg({'time': 'first'}), exp)
tm.assert_series_equal(grouped.time.first(), exp['time'])
tm.assert_series_equal(grouped.time.agg('first'), exp['time'])
# test for `last` function
exp = df.loc[[0, 3, 4, 7]].set_index('class')
grouped = df.groupby('class')
tm.assert_frame_equal(grouped.last(), exp)
tm.assert_frame_equal(grouped.agg('last'), exp)
tm.assert_frame_equal(grouped.agg({'time': 'last'}), exp)
tm.assert_series_equal(grouped.time.last(), exp['time'])
tm.assert_series_equal(grouped.time.agg('last'), exp['time'])
# count
exp = pd.Series([2, 2, 2, 2],
index=Index(list('ABCD'), name='class'),
name='time')
tm.assert_series_equal(grouped.time.agg(len), exp)
tm.assert_series_equal(grouped.time.size(), exp)
exp = pd.Series([0, 1, 1, 2],
index=Index(list('ABCD'), name='class'),
name='time')
tm.assert_series_equal(grouped.time.count(), exp)
def test_agg_cast_results_dtypes():
# similar to GH12821
# xref #11444
u = [datetime(2015, x + 1, 1) for x in range(12)]
v = list('aaabbbbbbccd')
df = pd.DataFrame({'X': v, 'Y': u})
result = df.groupby('X')['Y'].agg(len)
expected = df.groupby('X')['Y'].count()
tm.assert_series_equal(result, expected)
def test_aggregate_float64_no_int64():
# see gh-11199
df = DataFrame({"a": [1, 2, 3, 4, 5],
"b": [1, 2, 2, 4, 5],
"c": [1, 2, 3, 4, 5]})
expected = DataFrame({"a": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a"]].mean()
tm.assert_frame_equal(result, expected)
expected = DataFrame({"a": [1, 2.5, 4, 5], "c": [1, 2.5, 4, 5]},
index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a", "c"]].mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_api_consistency():
# GH 9052
# make sure that the aggregates via dict
# are consistent
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
grouped = df.groupby(['A', 'B'])
c_mean = grouped['C'].mean()
c_sum = grouped['C'].sum()
d_mean = grouped['D'].mean()
d_sum = grouped['D'].sum()
result = grouped['D'].agg(['sum', 'mean'])
expected = pd.concat([d_sum, d_mean], axis=1)
expected.columns = ['sum', 'mean']
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg([np.sum, np.mean])
expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['sum', 'mean']])
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped[['D', 'C']].agg([np.sum, np.mean])
expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1)
expected.columns = MultiIndex.from_product([['D', 'C'],
['sum', 'mean']])
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': 'mean', 'D': 'sum'})
expected = pd.concat([d_sum, c_mean], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': ['mean', 'sum'],
'D': ['mean', 'sum']})
expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['mean', 'sum']])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = grouped[['D', 'C']].agg({'r': np.sum,
'r2': np.mean})
expected = pd.concat([d_sum, c_sum, d_mean, c_mean], axis=1)
expected.columns = MultiIndex.from_product([['r', 'r2'],
['D', 'C']])
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_dict_renaming_deprecation():
# 15931
df = pd.DataFrame({'A': [1, 1, 1, 2, 2],
'B': range(5),
'C': range(5)})
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False) as w:
df.groupby('A').agg({'B': {'foo': ['sum', 'max']},
'C': {'bar': ['count', 'min']}})
assert "using a dict with renaming" in str(w[0].message)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df.groupby('A')[['B', 'C']].agg({'ma': 'max'})
with tm.assert_produces_warning(FutureWarning) as w:
df.groupby('A').B.agg({'foo': 'count'})
assert "using a dict on a Series for aggregation" in str(w[0].message)
def test_agg_compat():
# GH 12334
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
expected = pd.concat([g['D'].sum(), g['D'].std()], axis=1)
expected.columns = MultiIndex.from_tuples([('C', 'sum'),
('C', 'std')])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g['D'].agg({'C': ['sum', 'std']})
tm.assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([g['D'].sum(), g['D'].std()], axis=1)
expected.columns = ['C', 'D']
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g['D'].agg({'C': 'sum', 'D': 'std'})
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_nested_dicts():
# API change for disallowing these types of nested dicts
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
msg = r'cannot perform renaming for r[1-2] with a nested dictionary'
with tm.assert_raises_regex(SpecificationError, msg):
g.aggregate({'r1': {'C': ['mean', 'sum']},
'r2': {'D': ['mean', 'sum']}})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g.agg({'C': {'ra': ['mean', 'std']},
'D': {'rb': ['mean', 'std']}})
expected = pd.concat([g['C'].mean(), g['C'].std(),
g['D'].mean(), g['D'].std()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples(
[('ra', 'mean'), ('ra', 'std'),
('rb', 'mean'), ('rb', 'std')])
tm.assert_frame_equal(result, expected, check_like=True)
# same name as the original column
# GH9052
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = g['D'].agg({'result1': np.sum, 'result2': np.mean})
expected = expected.rename(columns={'result1': 'D'})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g['D'].agg({'D': np.sum, 'result2': np.mean})
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_item_by_item_raise_typeerror():
from numpy.random import randint
df = DataFrame(randint(10, size=(20, 10)))
def raiseException(df):
pprint_thing('----------------------------------------')
pprint_thing(df.to_string())
raise TypeError('test')
with tm.assert_raises_regex(TypeError, 'test'):
df.groupby(0).agg(raiseException)
def test_series_agg_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
def test_series_agg_multi_pure_python():
data = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def bad(x):
assert (len(x.base) > 0)
return 'foo'
result = data.groupby(['A', 'B']).agg(bad)
expected = data.groupby(['A', 'B']).agg(lambda x: 'foo')
tm.assert_frame_equal(result, expected)
def test_agg_consistency():
# agg with ([]) and () not consistent
# GH 6715
def P1(a):
try:
return np.percentile(a.dropna(), q=1)
except Exception:
return np.nan
import datetime as dt
df = DataFrame({'col1': [1, 2, 3, 4],
'col2': [10, 25, 26, 31],
'date': [dt.date(2013, 2, 10), dt.date(2013, 2, 10),
dt.date(2013, 2, 11), dt.date(2013, 2, 11)]})
g = df.groupby('date')
expected = g.agg([P1])
expected.columns = expected.columns.levels[0]
result = g.agg(P1)
tm.assert_frame_equal(result, expected)
def test_agg_callables():
# GH 7929
df = DataFrame({'foo': [1, 2], 'bar': [3, 4]}).astype(np.int64)
class fn_class(object):
def __call__(self, x):
return sum(x)
equiv_callables = [sum,
np.sum,
lambda x: sum(x),
lambda x: x.sum(),
partial(sum),
fn_class(), ]
expected = df.groupby("foo").agg(sum)
for ecall in equiv_callables:
result = df.groupby('foo').agg(ecall)
tm.assert_frame_equal(result, expected)
def test_agg_over_numpy_arrays():
# GH 3788
df = pd.DataFrame([[1, np.array([10, 20, 30])],
[1, np.array([40, 50, 60])],
[2, np.array([20, 30, 40])]],
columns=['category', 'arraydata'])
result = df.groupby('category').agg(sum)
expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]
expected_index = pd.Index([1, 2], name='category')
expected_column = ['arraydata']
expected = pd.DataFrame(expected_data,
index=expected_index,
columns=expected_column)
tm.assert_frame_equal(result, expected)
def test_agg_timezone_round_trip():
# GH 15426
ts = pd.Timestamp("2016-01-01 12:00:00", tz='US/Pacific')
df = pd.DataFrame({'a': 1,
'b': [ts + timedelta(minutes=nn) for nn in range(10)]})
result1 = df.groupby('a')['b'].agg(np.min).iloc[0]
result2 = df.groupby('a')['b'].agg(lambda x: np.min(x)).iloc[0]
result3 = df.groupby('a')['b'].min().iloc[0]
assert result1 == ts
assert result2 == ts
assert result3 == ts
dates = [pd.Timestamp("2016-01-0%d 12:00:00" % i, tz='US/Pacific')
for i in range(1, 5)]
df = pd.DataFrame({'A': ['a', 'b'] * 2, 'B': dates})
grouped = df.groupby('A')
ts = df['B'].iloc[0]
assert ts == grouped.nth(0)['B'].iloc[0]
assert ts == grouped.head(1)['B'].iloc[0]
assert ts == grouped.first()['B'].iloc[0]
assert ts == grouped.apply(lambda x: x.iloc[0])[0]
ts = df['B'].iloc[2]
assert ts == grouped.last()['B'].iloc[0]
assert ts == grouped.apply(lambda x: x.iloc[-1])[0]
def test_sum_uint64_overflow():
# see gh-14758
# Convert to uint64 and don't overflow
df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object)
df = df + 9223372036854775807
index = pd.Index([9223372036854775808,
9223372036854775810,
9223372036854775812],
dtype=np.uint64)
expected = pd.DataFrame({1: [9223372036854775809,
9223372036854775811,
9223372036854775813]},
index=index)
expected.index.name = 0
result = df.groupby(0).sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("structure, expected", [
(tuple, pd.DataFrame({'C': {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})),
(list, pd.DataFrame({'C': {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})),
(lambda x: tuple(x), pd.DataFrame({'C': {(1, 1): (1, 1, 1),
(3, 4): (3, 4, 4)}})),
(lambda x: list(x), pd.DataFrame({'C': {(1, 1): [1, 1, 1],
(3, 4): [3, 4, 4]}}))
])
def test_agg_structs_dataframe(structure, expected):
df = pd.DataFrame({'A': [1, 1, 1, 3, 3, 3],
'B': [1, 1, 1, 4, 4, 4],
'C': [1, 1, 1, 3, 4, 4]})
result = df.groupby(['A', 'B']).aggregate(structure)
expected.index.names = ['A', 'B']
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("structure, expected", [
(tuple, pd.Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name='C')),
(list, pd.Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name='C')),
(lambda x: tuple(x), pd.Series([(1, 1, 1), (3, 4, 4)],
index=[1, 3], name='C')),
(lambda x: list(x), pd.Series([[1, 1, 1], [3, 4, 4]],
index=[1, 3], name='C'))
])
def test_agg_structs_series(structure, expected):
# Issue #18079
df = pd.DataFrame({'A': [1, 1, 1, 3, 3, 3],
'B': [1, 1, 1, 4, 4, 4],
'C': [1, 1, 1, 3, 4, 4]})
result = df.groupby('A')['C'].aggregate(structure)
expected.index.name = 'A'
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(reason="GH-18869: agg func not called on empty groups.")
def test_agg_category_nansum():
categories = ['a', 'b', 'c']
df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'],
categories=categories),
'B': [1, 2, 3]})
result = df.groupby("A").B.agg(np.nansum)
expected = pd.Series([3, 3, 0],
index=pd.CategoricalIndex(['a', 'b', 'c'],
categories=categories,
name='A'),
name='B')
tm.assert_series_equal(result, expected)
| bsd-3-clause |
untom/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 67 | 14842 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = assert_warns(DeprecationWarning, make_multilabel_classification,
n_samples=100, n_features=20, n_classes=3,
random_state=0, allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator=True,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
return_indicator=True, allow_unlabeled=allow_unlabeled,
return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
X, y = make_blobs(n_samples=50, n_features=2,
centers=[[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]],
random_state=0)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
ajdawson/cartopy | lib/cartopy/tests/mpl/test_mpl_integration.py | 2 | 18162 | # (C) British Crown Copyright 2011 - 2014, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import math
import warnings
from nose.tools import assert_equal
import numpy as np
import matplotlib.pyplot as plt
import six
import cartopy.crs as ccrs
from cartopy.tests.mpl import ImageTesting
@ImageTesting(['global_contour_wrap'])
def test_global_contour_wrap_new_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.contour(x, y, data, transform=ccrs.PlateCarree())
@ImageTesting(['global_contour_wrap'])
def test_global_contour_wrap_no_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.contour(x, y, data)
@ImageTesting(['global_contourf_wrap'])
def test_global_contourf_wrap_new_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.contourf(x, y, data, transform=ccrs.PlateCarree())
@ImageTesting(['global_contourf_wrap'])
def test_global_contourf_wrap_no_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.contourf(x, y, data)
@ImageTesting(['global_pcolor_wrap'])
def test_global_pcolor_wrap_new_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.pcolor(x, y, data, transform=ccrs.PlateCarree())
@ImageTesting(['global_pcolor_wrap'])
def test_global_pcolor_wrap_no_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.pcolor(x, y, data)
@ImageTesting(['global_scatter_wrap'])
def test_global_scatter_wrap_new_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
# By default the coastline feature will be drawn after patches.
# By setting zorder we can ensure our scatter points are drawn
# after the coastlines.
ax.coastlines(zorder=0)
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.scatter(x, y, c=data, transform=ccrs.PlateCarree())
@ImageTesting(['global_scatter_wrap'])
def test_global_scatter_wrap_no_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines(zorder=0)
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.scatter(x, y, c=data)
@ImageTesting(['global_map'])
def test_global_map():
ax = plt.axes(projection=ccrs.Robinson())
# ax.coastlines()
# ax.gridlines(5)
plt.plot(-0.08, 51.53, 'o', transform=ccrs.PlateCarree())
plt.plot([-0.08, 132], [51.53, 43.17], color='red',
transform=ccrs.PlateCarree())
plt.plot([-0.08, 132], [51.53, 43.17], color='blue',
transform=ccrs.Geodetic())
@ImageTesting(['simple_global'])
def test_simple_global():
plt.axes(projection=ccrs.PlateCarree())
plt.gca().coastlines()
# produces a global map, despite not having needed to set the limits
@ImageTesting(['multiple_projections1'])
def test_multiple_projections():
projections = [ccrs.PlateCarree(),
ccrs.Robinson(),
ccrs.RotatedPole(pole_latitude=45, pole_longitude=180),
ccrs.OSGB(),
ccrs.TransverseMercator(),
ccrs.Mercator(
globe=ccrs.Globe(semimajor_axis=math.degrees(1)),
min_latitude=-85., max_latitude=85.),
ccrs.LambertCylindrical(),
ccrs.Miller(),
ccrs.Gnomonic(),
ccrs.Stereographic(),
ccrs.NorthPolarStereo(),
ccrs.SouthPolarStereo(),
ccrs.Orthographic(),
ccrs.Mollweide(),
ccrs.InterruptedGoodeHomolosine(),
]
fig = plt.figure(figsize=(10, 10))
for i, prj in enumerate(projections, 1):
ax = fig.add_subplot(5, 5, i, projection=prj)
ax.set_global()
ax.coastlines()
plt.plot(-0.08, 51.53, 'o', transform=ccrs.PlateCarree())
plt.plot([-0.08, 132], [51.53, 43.17], color='red',
transform=ccrs.PlateCarree())
plt.plot([-0.08, 132], [51.53, 43.17], color='blue',
transform=ccrs.Geodetic())
def test_cursor_values():
ax = plt.axes(projection=ccrs.NorthPolarStereo())
x, y = np.array([-969100.]), np.array([-4457000.])
r = ax.format_coord(x, y)
assert_equal(r.encode('ascii', 'ignore'),
six.b('-9.691e+05, -4.457e+06 (50.716617N, 12.267069W)'))
ax = plt.axes(projection=ccrs.PlateCarree())
x, y = np.array([-181.5]), np.array([50.])
r = ax.format_coord(x, y)
assert_equal(r.encode('ascii', 'ignore'),
six.b('-181.5, 50 (50.000000N, 178.500000E)'))
ax = plt.axes(projection=ccrs.Robinson())
x, y = np.array([16060595.2]), np.array([2363093.4])
r = ax.format_coord(x, y)
assert_equal(r.encode('ascii', 'ignore'),
six.b('1.606e+07, 2.363e+06 (22.095524N, 173.709136E)'))
plt.close()
@ImageTesting(['natural_earth_interface'])
def test_axes_natural_earth_interface():
rob = ccrs.Robinson()
ax = plt.axes(projection=rob)
with warnings.catch_warnings(record=True) as all_warnings:
warnings.simplefilter('always')
ax.natural_earth_shp('rivers_lake_centerlines', edgecolor='black',
facecolor='none')
ax.natural_earth_shp('lakes', facecolor='blue')
assert_equal(len(all_warnings), 2)
for warning in all_warnings:
msg = str(warning.message)
assert 'deprecated' in msg
assert 'add_feature' in msg
@ImageTesting(['pcolormesh_global_wrap1'])
def test_pcolormesh_global_with_wrap1():
# make up some realistic data with bounds (such as data from the UM)
nx, ny = 36, 18
xbnds = np.linspace(0, 360, nx, endpoint=True)
ybnds = np.linspace(-90, 90, ny, endpoint=True)
x, y = np.meshgrid(xbnds, ybnds)
data = np.exp(np.sin(np.deg2rad(x)) + np.cos(np.deg2rad(y)))
data = data[:-1, :-1]
ax = plt.subplot(211, projection=ccrs.PlateCarree())
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
ax = plt.subplot(212, projection=ccrs.PlateCarree(180))
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
@ImageTesting(['pcolormesh_global_wrap2'])
def test_pcolormesh_global_with_wrap2():
# make up some realistic data with bounds (such as data from the UM)
nx, ny = 36, 18
xbnds, xstep = np.linspace(0, 360, nx - 1, retstep=True, endpoint=True)
ybnds, ystep = np.linspace(-90, 90, nx - 1, retstep=True, endpoint=True)
xbnds -= xstep / 2
ybnds -= ystep / 2
xbnds = np.append(xbnds, xbnds[-1] + xstep)
ybnds = np.append(ybnds, ybnds[-1] + ystep)
x, y = np.meshgrid(xbnds, ybnds)
data = np.exp(np.sin(np.deg2rad(x)) + np.cos(np.deg2rad(y)))
data = data[:-1, :-1]
ax = plt.subplot(211, projection=ccrs.PlateCarree())
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
ax = plt.subplot(212, projection=ccrs.PlateCarree(180))
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
@ImageTesting(['pcolormesh_global_wrap3'])
def test_pcolormesh_global_with_wrap3():
nx, ny = 33, 17
xbnds = np.linspace(-1.875, 358.125, nx, endpoint=True)
ybnds = np.linspace(91.25, -91.25, ny, endpoint=True)
xbnds, ybnds = np.meshgrid(xbnds, ybnds)
data = np.exp(np.sin(np.deg2rad(xbnds)) + np.cos(np.deg2rad(ybnds)))
# this step is not necessary, but makes the plot even harder to do (i.e.
# it really puts cartopy through its paces)
ybnds = np.append(ybnds, ybnds[:, 1:2], axis=1)
xbnds = np.append(xbnds, xbnds[:, 1:2] + 360, axis=1)
data = np.ma.concatenate([data, data[:, 0:1]], axis=1)
data = data[:-1, :-1]
data = np.ma.masked_greater(data, 2.6)
ax = plt.subplot(311, projection=ccrs.PlateCarree(-45))
c = plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
assert c._wrapped_collection_fix is not None, \
'No pcolormesh wrapping was done when it should have been.'
ax.coastlines()
ax.set_global() # make sure everything is visible
ax = plt.subplot(312, projection=ccrs.PlateCarree(-1.87499952))
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
ax = plt.subplot(313, projection=ccrs.Robinson(-2))
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
@ImageTesting(['pcolormesh_limited_area_wrap'])
def test_pcolormesh_limited_area_wrap():
# make up some realistic data with bounds (such as data from the UM's North
# Atlantic Europe model)
nx, ny = 22, 36
xbnds = np.linspace(311.91998291, 391.11999512, nx, endpoint=True)
ybnds = np.linspace(-23.59000015, 24.81000137, ny, endpoint=True)
x, y = np.meshgrid(xbnds, ybnds)
data = ((np.sin(np.deg2rad(x))) / 10. + np.exp(np.cos(np.deg2rad(y))))
data = data[:-1, :-1]
rp = ccrs.RotatedPole(pole_longitude=177.5, pole_latitude=37.5)
plt.figure(figsize=(10, 6))
ax = plt.subplot(221, projection=ccrs.PlateCarree())
plt.pcolormesh(xbnds, ybnds, data, transform=rp, cmap='Set1')
ax.coastlines()
ax = plt.subplot(222, projection=ccrs.PlateCarree(180))
plt.pcolormesh(xbnds, ybnds, data, transform=rp, cmap='Set1')
ax.coastlines()
ax.set_global()
# draw the same plot, only more zoomed in, and using the 2d versions
# of the coordinates (just to test that 1d and 2d are both suitably
# being fixed)
ax = plt.subplot(223, projection=ccrs.PlateCarree(180))
plt.pcolormesh(x, y, data, transform=rp, cmap='Set1')
ax.coastlines()
ax.set_extent([-70, 0, 0, 80])
ax = plt.subplot(224, projection=rp)
plt.pcolormesh(xbnds, ybnds, data, transform=rp, cmap='Set1')
ax.coastlines()
@ImageTesting(['pcolormesh_goode_wrap'])
def test_pcolormesh_goode_wrap():
# global data on an Interrupted Goode Homolosine projection
# shouldn't spill outside projection boundary
x = np.linspace(0, 360, 73)
y = np.linspace(-87.5, 87.5, 36)
X, Y = np.meshgrid(*[np.deg2rad(c) for c in (x, y)])
Z = np.cos(Y) + 0.375 * np.sin(2. * X)
Z = Z[:-1, :-1]
ax = plt.axes(projection=ccrs.InterruptedGoodeHomolosine())
ax.coastlines()
ax.pcolormesh(x, y, Z, transform=ccrs.PlateCarree())
@ImageTesting(['pcolormesh_mercator_wrap'])
def test_pcolormesh_mercator_wrap():
x = np.linspace(0, 360, 73)
y = np.linspace(-87.5, 87.5, 36)
X, Y = np.meshgrid(*[np.deg2rad(c) for c in (x, y)])
Z = np.cos(Y) + 0.375 * np.sin(2. * X)
Z = Z[:-1, :-1]
ax = plt.axes(projection=ccrs.Mercator())
ax.coastlines()
ax.pcolormesh(x, y, Z, transform=ccrs.PlateCarree())
@ImageTesting(['quiver_plate_carree'])
def test_quiver_plate_carree():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 6))
# plot on native projection
ax = plt.subplot(211, projection=ccrs.PlateCarree())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.quiver(x, y, u, v, mag)
# plot on a different projection
ax = plt.subplot(212, projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.quiver(x, y, u, v, mag, transform=ccrs.PlateCarree())
@ImageTesting(['quiver_rotated_pole'])
def test_quiver_rotated_pole():
nx, ny = 22, 36
x = np.linspace(311.91998291, 391.11999512, nx, endpoint=True)
y = np.linspace(-23.59000015, 24.81000137, ny, endpoint=True)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = -2. * np.cos(2. * np.deg2rad(y2d)) * np.sin(np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
rp = ccrs.RotatedPole(pole_longitude=177.5, pole_latitude=37.5)
plot_extent = [x[0], x[-1], y[0], y[-1]]
# plot on native projection
plt.figure(figsize=(6, 6))
ax = plt.subplot(211, projection=rp)
ax.set_extent(plot_extent, crs=rp)
ax.coastlines()
ax.quiver(x, y, u, v, mag)
# plot on different projection
ax = plt.subplot(212, projection=ccrs.PlateCarree())
ax.set_extent(plot_extent, crs=rp)
ax.coastlines()
ax.quiver(x, y, u, v, mag, transform=rp)
@ImageTesting(['quiver_regrid'])
def test_quiver_regrid():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.quiver(x, y, u, v, mag, transform=ccrs.PlateCarree(),
regrid_shape=30)
@ImageTesting(['quiver_regrid_with_extent'])
def test_quiver_regrid_with_extent():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
target_extent = [-3e6, 2e6, -6e6, -2.5e6]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.quiver(x, y, u, v, mag, transform=ccrs.PlateCarree(),
regrid_shape=10, target_extent=target_extent)
@ImageTesting(['barbs_plate_carree'])
def test_barbs():
x = np.arange(-60, 45, 5)
y = np.arange(30, 75, 5)
x2d, y2d = np.meshgrid(x, y)
u = 40 * np.cos(np.deg2rad(y2d))
v = 40 * np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 6))
# plot on native projection
ax = plt.subplot(211, projection=ccrs.PlateCarree())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, length=4, linewidth=.25)
# plot on a different projection
ax = plt.subplot(212, projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, transform=ccrs.PlateCarree(), length=4, linewidth=.25)
@ImageTesting(['barbs_regrid'])
def test_barbs_regrid():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = 40 * np.cos(np.deg2rad(y2d))
v = 40 * np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, mag, transform=ccrs.PlateCarree(),
length=4, linewidth=.4, regrid_shape=20)
@ImageTesting(['barbs_regrid_with_extent'])
def test_barbs_regrid_with_extent():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = 40 * np.cos(np.deg2rad(y2d))
v = 40 * np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
target_extent = [-3e6, 2e6, -6e6, -2.5e6]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, mag, transform=ccrs.PlateCarree(),
length=4, linewidth=.25, regrid_shape=10,
target_extent=target_extent)
@ImageTesting(['streamplot'])
def test_streamplot():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.streamplot(x, y, u, v, transform=ccrs.PlateCarree(),
density=(1.5, 2), color=mag, linewidth=2*mag)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| lgpl-3.0 |
anilmuthineni/tensorflow | tensorflow/contrib/factorization/python/ops/gmm.py | 11 | 12252 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering.
This goes on top of skflow API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib import framework
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python.learn import graph_actions
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import TransformerMixin
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.platform import tf_logging as logging
def _streaming_sum(scalar_tensor):
"""Create a sum metric and update op."""
sum_metric = framework.local_variable(constant_op.constant(0.0))
sum_update = sum_metric.assign_add(scalar_tensor)
return sum_metric, sum_update
class GMM(estimator_lib.Estimator, TransformerMixin):
"""GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
batch_size=128,
steps=10,
continue_training=False,
config=None,
verbose=1):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
batch_size: See Estimator
steps: See Estimator
continue_training: See Estimator
config: See Estimator
verbose: See Estimator
"""
super(GMM, self).__init__(model_dir=model_dir, config=config)
self.batch_size = batch_size
self.steps = steps
self.continue_training = continue_training
self.verbose = verbose
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
def fit(self, x, y=None, monitors=None, logdir=None, steps=None):
"""Trains a GMM clustering on x.
Note: See Estimator for logic for continuous training and graph
construction across multiple calls to fit.
Args:
x: training input matrix of shape [n_samples, n_features].
y: labels. Should be None.
monitors: List of `Monitor` objects to print training progress and
invoke early stopping.
logdir: the directory to save the log file that can be used for optional
visualization.
steps: number of training steps. If not None, overrides the value passed
in constructor.
Returns:
Returns self.
"""
if logdir is not None:
self._model_dir = logdir
self._data_feeder = data_feeder.setup_train_data_feeder(x, None,
self._num_clusters,
self.batch_size)
_legacy_train_model( # pylint: disable=protected-access
self,
input_fn=self._data_feeder.input_builder,
feed_fn=self._data_feeder.get_feed_dict_fn(),
steps=steps or self.steps,
monitors=monitors,
init_feed_fn=self._data_feeder.get_feed_dict_fn())
return self
def predict(self, x, batch_size=None):
"""Predict cluster id for each element in x.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, containing cluster ids.
"""
return np.array([
prediction[GMM.ASSIGNMENTS]
for prediction in super(GMM, self).predict(
x=x, batch_size=batch_size, as_iterable=True)
])
def score(self, x, batch_size=None):
"""Predict total sum of distances to nearest clusters.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Total score.
"""
return np.sum(self.evaluate(x=x, batch_size=batch_size)[GMM.SCORES])
def transform(self, x, batch_size=None):
"""Transforms each element in x to distances to cluster centers.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
return np.array([
prediction[GMM.ALL_SCORES]
for prediction in super(GMM, self).predict(
x=x, batch_size=batch_size, as_iterable=True)
])
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat([features[k] for k in sorted(features.keys())], 1)
return features
def _get_train_ops(self, features, _):
(_, _, losses, training_op) = gmm_ops.gmm(
self._parse_tensor_or_dict(features), self._training_initial_clusters,
self._num_clusters, self._random_seed, self._covariance_type,
self._params)
incr_step = state_ops.assign_add(variables.get_global_step(), 1)
loss = math_ops.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
return training_op, loss
def _get_predict_ops(self, features):
(all_scores, model_predictions, _, _) = gmm_ops.gmm(
self._parse_tensor_or_dict(features), self._training_initial_clusters,
self._num_clusters, self._random_seed, self._covariance_type,
self._params)
return {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
def _get_eval_ops(self, features, _, unused_metrics):
(_,
_,
losses,
_) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
return {GMM.SCORES: _streaming_sum(math_ops.reduce_sum(losses))}
# TODO(xavigonzalvo): delete this after implementing model-fn based Estimator.
def _legacy_train_model(estimator,
input_fn,
steps,
feed_fn=None,
init_op=None,
init_feed_fn=None,
init_fn=None,
device_fn=None,
monitors=None,
log_every_steps=100,
fail_on_nan_loss=True,
max_steps=None):
"""Legacy train function of Estimator."""
if hasattr(estimator.config, 'execution_mode'):
if estimator.config.execution_mode not in ('all', 'train'):
return
# Stagger startup of worker sessions based on task id.
sleep_secs = min(
estimator.config.training_worker_max_startup_secs,
estimator.config.task_id *
estimator.config.training_worker_session_startup_stagger_secs)
if sleep_secs:
logging.info('Waiting %d secs before starting task %d.', sleep_secs,
estimator.config.task_id)
time.sleep(sleep_secs)
# Device allocation
device_fn = device_fn or estimator._device_fn # pylint: disable=protected-access
with ops.Graph().as_default() as g, g.device(device_fn):
random_seed_lib.set_random_seed(estimator.config.tf_random_seed)
global_step = framework.create_global_step(g)
features, labels = input_fn()
estimator._check_inputs(features, labels) # pylint: disable=protected-access
# The default return type of _get_train_ops is ModelFnOps. But there are
# some subclasses of tf.contrib.learn.Estimator which override this
# method and use the legacy signature, namely _get_train_ops returns a
# (train_op, loss) tuple. The following else-statement code covers these
# cases, but will soon be deleted after the subclasses are updated.
# TODO(b/32664904): Update subclasses and delete the else-statement.
train_ops = estimator._get_train_ops(features, labels) # pylint: disable=protected-access
if isinstance(train_ops, model_fn_lib.ModelFnOps): # Default signature
train_op = train_ops.train_op
loss_op = train_ops.loss
if estimator.config.is_chief:
hooks = train_ops.training_chief_hooks + train_ops.training_hooks
else:
hooks = train_ops.training_hooks
else: # Legacy signature
if len(train_ops) != 2:
raise ValueError('Expected a tuple of train_op and loss, got {}'.format(
train_ops))
train_op = train_ops[0]
loss_op = train_ops[1]
hooks = []
hooks += monitor_lib.replace_monitors_with_hooks(monitors, estimator)
ops.add_to_collection(ops.GraphKeys.LOSSES, loss_op)
return graph_actions._monitored_train( # pylint: disable=protected-access
graph=g,
output_dir=estimator.model_dir,
train_op=train_op,
loss_op=loss_op,
global_step_tensor=global_step,
init_op=init_op,
init_feed_dict=init_feed_fn() if init_feed_fn is not None else None,
init_fn=init_fn,
log_every_steps=log_every_steps,
supervisor_is_chief=estimator.config.is_chief,
supervisor_master=estimator.config.master,
supervisor_save_model_secs=estimator.config.save_checkpoints_secs,
supervisor_save_model_steps=estimator.config.save_checkpoints_steps,
supervisor_save_summaries_steps=estimator.config.save_summary_steps,
keep_checkpoint_max=estimator.config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
estimator.config.keep_checkpoint_every_n_hours),
feed_fn=feed_fn,
steps=steps,
fail_on_nan_loss=fail_on_nan_loss,
hooks=hooks,
max_steps=max_steps)
| apache-2.0 |
rohit21122012/DCASE2013 | runs/2016/baseline32/src/dataset.py | 37 | 78389 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import urllib2
import socket
import locale
import zipfile
import tarfile
from sklearn.cross_validation import StratifiedShuffleSplit, KFold
from ui import *
from general import *
from files import *
class Dataset(object):
"""Dataset base class.
The specific dataset classes are inherited from this class, and only needed methods are reimplemented.
"""
def __init__(self, data_path='data', name='dataset'):
"""__init__ method.
Parameters
----------
data_path : str
Basepath where the dataset is stored.
(Default value='data')
"""
# Folder name for dataset
self.name = name
# Path to the dataset
self.local_path = os.path.join(data_path, self.name)
# Create the dataset path if does not exist
if not os.path.isdir(self.local_path):
os.makedirs(self.local_path)
# Evaluation setup folder
self.evaluation_setup_folder = 'evaluation_setup'
# Path to the folder containing evaluation setup files
self.evaluation_setup_path = os.path.join(self.local_path, self.evaluation_setup_folder)
# Meta data file, csv-format
self.meta_filename = 'meta.txt'
# Path to meta data file
self.meta_file = os.path.join(self.local_path, self.meta_filename)
# Hash file to detect removed or added files
self.filelisthash_filename = 'filelist.hash'
# Number of evaluation folds
self.evaluation_folds = 1
# List containing dataset package items
# Define this in the inherited class.
# Format:
# {
# 'remote_package': download_url,
# 'local_package': os.path.join(self.local_path, 'name_of_downloaded_package'),
# 'local_audio_path': os.path.join(self.local_path, 'name_of_folder_containing_audio_files'),
# }
self.package_list = []
# List of audio files
self.files = None
# List of meta data dict
self.meta_data = None
# Training meta data for folds
self.evaluation_data_train = {}
# Testing meta data for folds
self.evaluation_data_test = {}
# Recognized audio extensions
self.audio_extensions = {'wav', 'flac'}
# Info fields for dataset
self.authors = ''
self.name_remote = ''
self.url = ''
self.audio_source = ''
self.audio_type = ''
self.recording_device_model = ''
self.microphone_model = ''
@property
def audio_files(self):
"""Get all audio files in the dataset
Parameters
----------
Nothing
Returns
-------
filelist : list
File list with absolute paths
"""
if self.files is None:
self.files = []
for item in self.package_list:
path = item['local_audio_path']
if path:
l = os.listdir(path)
for f in l:
file_name, file_extension = os.path.splitext(f)
if file_extension[1:] in self.audio_extensions:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
@property
def audio_file_count(self):
"""Get number of audio files in dataset
Parameters
----------
Nothing
Returns
-------
filecount : int
Number of audio files
"""
return len(self.audio_files)
@property
def meta(self):
"""Get meta data for dataset. If not already read from disk, data is read and returned.
Parameters
----------
Nothing
Returns
-------
meta_data : list
List containing meta data as dict.
Raises
-------
IOError
meta file not found.
"""
if self.meta_data is None:
self.meta_data = []
meta_id = 0
if os.path.isfile(self.meta_file):
f = open(self.meta_file, 'rt')
try:
reader = csv.reader(f, delimiter='\t')
for row in reader:
if len(row) == 2:
# Scene meta
self.meta_data.append({'file': row[0], 'scene_label': row[1].rstrip()})
elif len(row) == 4:
# Audio tagging meta
self.meta_data.append(
{'file': row[0], 'scene_label': row[1].rstrip(), 'tag_string': row[2].rstrip(),
'tags': row[3].split(';')})
elif len(row) == 6:
# Event meta
self.meta_data.append({'file': row[0],
'scene_label': row[1].rstrip(),
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4].rstrip(),
'event_type': row[5].rstrip(),
'id': meta_id
})
meta_id += 1
finally:
f.close()
else:
raise IOError("Meta file not found [%s]" % self.meta_file)
return self.meta_data
@property
def meta_count(self):
"""Number of meta data items.
Parameters
----------
Nothing
Returns
-------
meta_item_count : int
Meta data item count
"""
return len(self.meta)
@property
def fold_count(self):
"""Number of fold in the evaluation setup.
Parameters
----------
Nothing
Returns
-------
fold_count : int
Number of folds
"""
return self.evaluation_folds
@property
def scene_labels(self):
"""List of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of scene labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'scene_label' in item and item['scene_label'] not in labels:
labels.append(item['scene_label'])
labels.sort()
return labels
@property
def scene_label_count(self):
"""Number of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
scene_label_count : int
Number of unique scene labels.
"""
return len(self.scene_labels)
@property
def event_labels(self):
"""List of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of event labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
@property
def event_label_count(self):
"""Number of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
event_label_count : int
Number of unique event labels
"""
return len(self.event_labels)
@property
def audio_tags(self):
"""List of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of audio tags in alphabetical order.
"""
tags = []
for item in self.meta:
if 'tags' in item:
for tag in item['tags']:
if tag and tag not in tags:
tags.append(tag)
tags.sort()
return tags
@property
def audio_tag_count(self):
"""Number of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
audio_tag_count : int
Number of unique audio tags
"""
return len(self.audio_tags)
def __getitem__(self, i):
"""Getting meta data item
Parameters
----------
i : int
item id
Returns
-------
meta_data : dict
Meta data item
"""
if i < len(self.meta):
return self.meta[i]
else:
return None
def __iter__(self):
"""Iterator for meta data items
Parameters
----------
Nothing
Returns
-------
Nothing
"""
i = 0
meta = self[i]
# yield window while it's valid
while meta is not None:
yield meta
# get next item
i += 1
meta = self[i]
@staticmethod
def print_bytes(num_bytes):
"""Output number of bytes according to locale and with IEC binary prefixes
Parameters
----------
num_bytes : int > 0 [scalar]
Bytes
Returns
-------
bytes : str
Human readable string
"""
KiB = 1024
MiB = KiB * KiB
GiB = KiB * MiB
TiB = KiB * GiB
PiB = KiB * TiB
EiB = KiB * PiB
ZiB = KiB * EiB
YiB = KiB * ZiB
locale.setlocale(locale.LC_ALL, '')
output = locale.format("%d", num_bytes, grouping=True) + ' bytes'
if num_bytes > YiB:
output += ' (%.4g YiB)' % (num_bytes / YiB)
elif num_bytes > ZiB:
output += ' (%.4g ZiB)' % (num_bytes / ZiB)
elif num_bytes > EiB:
output += ' (%.4g EiB)' % (num_bytes / EiB)
elif num_bytes > PiB:
output += ' (%.4g PiB)' % (num_bytes / PiB)
elif num_bytes > TiB:
output += ' (%.4g TiB)' % (num_bytes / TiB)
elif num_bytes > GiB:
output += ' (%.4g GiB)' % (num_bytes / GiB)
elif num_bytes > MiB:
output += ' (%.4g MiB)' % (num_bytes / MiB)
elif num_bytes > KiB:
output += ' (%.4g KiB)' % (num_bytes / KiB)
return output
def download(self):
"""Download dataset over the internet to the local path
Parameters
----------
Nothing
Returns
-------
Nothing
Raises
-------
IOError
Download failed.
"""
section_header('Download dataset')
for item in self.package_list:
try:
if item['remote_package'] and not os.path.isfile(item['local_package']):
data = None
req = urllib2.Request(item['remote_package'], data, {})
handle = urllib2.urlopen(req)
if "Content-Length" in handle.headers.items():
size = int(handle.info()["Content-Length"])
else:
size = None
actualSize = 0
blocksize = 64 * 1024
tmp_file = os.path.join(self.local_path, 'tmp_file')
fo = open(tmp_file, "wb")
terminate = False
while not terminate:
block = handle.read(blocksize)
actualSize += len(block)
if size:
progress(title_text=os.path.split(item['local_package'])[1],
percentage=actualSize / float(size),
note=self.print_bytes(actualSize))
else:
progress(title_text=os.path.split(item['local_package'])[1],
note=self.print_bytes(actualSize))
if len(block) == 0:
break
fo.write(block)
fo.close()
os.rename(tmp_file, item['local_package'])
except (urllib2.URLError, socket.timeout), e:
try:
fo.close()
except:
raise IOError('Download failed [%s]' % (item['remote_package']))
foot()
def extract(self):
"""Extract the dataset packages
Parameters
----------
Nothing
Returns
-------
Nothing
"""
section_header('Extract dataset')
for item_id, item in enumerate(self.package_list):
if item['local_package']:
if item['local_package'].endswith('.zip'):
with zipfile.ZipFile(item['local_package'], "r") as z:
# Trick to omit first level folder
parts = []
for name in z.namelist():
if not name.endswith('/'):
parts.append(name.split('/')[:-1])
prefix = os.path.commonprefix(parts) or ''
if prefix:
if len(prefix) > 1:
prefix_ = list()
prefix_.append(prefix[0])
prefix = prefix_
prefix = '/'.join(prefix) + '/'
offset = len(prefix)
# Start extraction
members = z.infolist()
file_count = 1
for i, member in enumerate(members):
if len(member.filename) > offset:
member.filename = member.filename[offset:]
if not os.path.isfile(os.path.join(self.local_path, member.filename)):
z.extract(member, self.local_path)
progress(title_text='Extracting ['+str(item_id)+'/'+str(len(self.package_list))+']', percentage=(file_count / float(len(members))),
note=member.filename)
file_count += 1
elif item['local_package'].endswith('.tar.gz'):
tar = tarfile.open(item['local_package'], "r:gz")
for i, tar_info in enumerate(tar):
if not os.path.isfile(os.path.join(self.local_path, tar_info.name)):
tar.extract(tar_info, self.local_path)
progress(title_text='Extracting ['+str(item_id)+'/'+str(len(self.package_list))+']', note=tar_info.name)
tar.members = []
tar.close()
foot()
def on_after_extract(self):
"""Dataset meta data preparation, this will be overloaded in dataset specific classes
Parameters
----------
Nothing
Returns
-------
Nothing
"""
pass
def get_filelist(self):
"""List of files under local_path
Parameters
----------
Nothing
Returns
-------
filelist: list
File list
"""
filelist = []
for path, subdirs, files in os.walk(self.local_path):
for name in files:
filelist.append(os.path.join(path, name))
return filelist
def check_filelist(self):
"""Generates hash from file list and check does it matches with one saved in filelist.hash.
If some files have been deleted or added, checking will result False.
Parameters
----------
Nothing
Returns
-------
result: bool
Result
"""
if os.path.isfile(os.path.join(self.local_path, self.filelisthash_filename)):
hash = load_text(os.path.join(self.local_path, self.filelisthash_filename))[0]
if hash != get_parameter_hash(sorted(self.get_filelist())):
return False
else:
return True
else:
return False
def save_filelist_hash(self):
"""Generates file list hash, and saves it as filelist.hash under local_path.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
filelist = self.get_filelist()
filelist_hash_not_found = True
for file in filelist:
if self.filelisthash_filename in file:
filelist_hash_not_found = False
if filelist_hash_not_found:
filelist.append(os.path.join(self.local_path, self.filelisthash_filename))
save_text(os.path.join(self.local_path, self.filelisthash_filename), get_parameter_hash(sorted(filelist)))
def fetch(self):
"""Download, extract and prepare the dataset.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
if not self.check_filelist():
self.download()
self.extract()
self.on_after_extract()
self.save_filelist_hash()
return self
def train(self, fold=0):
"""List of training items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to training set for given fold.
"""
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 2:
# Scene meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1]
})
elif len(row) == 4:
# Audio tagging meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'tag_string': row[2],
'tags': row[3].split(';')
})
elif len(row) == 5:
# Event meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
else:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label']
})
self.evaluation_data_train[0] = data
return self.evaluation_data_train[fold]
def test(self, fold=0):
"""List of testing items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to testing set for given fold.
"""
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[fold] = data
return self.evaluation_data_test[fold]
def folds(self, mode='folds'):
"""List of fold ids
Parameters
----------
mode : str {'folds','full'}
Fold setup type, possible values are 'folds' and 'full'. In 'full' mode fold number is set 0 and all data is used for training.
(Default value=folds)
Returns
-------
list : list of integers
Fold ids
"""
if mode == 'folds':
return range(1, self.evaluation_folds + 1)
elif mode == 'full':
return [0]
def file_meta(self, file):
"""Meta data for given file
Parameters
----------
file : str
File name
Returns
-------
list : list of dicts
List containing all meta data related to given file.
"""
file = self.absolute_to_relative(file)
file_meta = []
for item in self.meta:
if item['file'] == file:
file_meta.append(item)
return file_meta
def relative_to_absolute_path(self, path):
"""Converts relative path into absolute path.
Parameters
----------
path : str
Relative path
Returns
-------
path : str
Absolute path
"""
return os.path.abspath(os.path.join(self.local_path, path))
def absolute_to_relative(self, path):
"""Converts absolute path into relative path.
Parameters
----------
path : str
Absolute path
Returns
-------
path : str
Relative path
"""
if path.startswith(os.path.abspath(self.local_path)):
return os.path.relpath(path, self.local_path)
else:
return path
# =====================================================
# DCASE 2016
# =====================================================
class TUTAcousticScenes_2016_DevelopmentSet(Dataset):
"""TUT Acoustic scenes 2016 development dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, development dataset'
self.url = 'https://zenodo.org/record/45739'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.1.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.1.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.2.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.2.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.3.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.3.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.4.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.4.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.5.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.5.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.6.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.6.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.7.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.7.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.8.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.8.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
}
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
meta_data = {}
for fold in xrange(1, self.evaluation_folds):
# Read train files in
train_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')
f = open(train_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
# Read evaluation files in
eval_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt')
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
class TUTAcousticScenes_2016_EvaluationSet(Dataset):
"""TUT Acoustic scenes 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
eval_filename = os.path.join(self.evaluation_setup_path, 'evaluate.txt')
if not os.path.isfile(self.meta_file) and os.path.isfile(eval_filename):
section_header('Generating meta file for dataset')
meta_data = {}
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
def train(self, fold=0):
raise IOError('Train setup not available.')
# TUT Sound events 2016 development and evaluation sets
class TUTSoundEvents_2016_DevelopmentSet(Dataset):
"""TUT Sound events 2016 development dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, development dataset'
self.url = 'https://zenodo.org/record/45759'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.audio.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.audio.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'), base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_train[fold]:
self.evaluation_data_train[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label_+'_fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 5:
# Event meta
self.evaluation_data_train[fold][scene_label_].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if item['scene_label'] == scene_label_:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
self.evaluation_data_train[0][scene_label_] = data
if scene_label:
return self.evaluation_data_train[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_train[fold][scene_label_]:
data.append(item)
return data
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label_+'_fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if scene_label_ in item:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
class TUTSoundEvents_2016_EvaluationSet(Dataset):
"""TUT Sound events 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
]
@property
def scene_labels(self):
labels = ['home', 'residential_area']
labels.sort()
return labels
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'] not in labels:
labels.append(item['event_label'])
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file) and os.path.isdir(os.path.join(self.local_path,'meta')):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'), base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
raise IOError('Train setup not available.')
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label + '_fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.audio_files:
if scene_label_ in item:
if self.relative_to_absolute_path(item) not in files:
data.append({'file': self.relative_to_absolute_path(item)})
files.append(self.relative_to_absolute_path(item))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
# CHIME home
class CHiMEHome_DomesticAudioTag_DevelopmentSet(Dataset):
def __init__(self, data_path=None):
Dataset.__init__(self, data_path=data_path, name = 'CHiMeHome-audiotag-development')
self.authors = 'Peter Foster, Siddharth Sigtia, Sacha Krstulovic, Jon Barker, and Mark Plumbley'
self.name_remote = 'The CHiME-Home dataset is a collection of annotated domestic environment audio recordings.'
self.url = ''
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Unknown'
self.evaluation_folds = 10
self.package_list = [
{
'remote_package': 'https://archive.org/download/chime-home/chime_home.tar.gz',
'local_package': os.path.join(self.local_path, 'chime_home.tar.gz'),
'local_audio_path': os.path.join(self.local_path, 'chime_home', 'chunks'),
},
]
@property
def audio_files(self):
"""Get all audio files in the dataset, use only file from CHime-Home-refined set.
Parameters
----------
nothing
Returns
-------
files : list
audio files
"""
if self.files is None:
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(row[1])
self.files = []
for file in self.package_list:
path = file['local_audio_path']
if path:
l = os.listdir(path)
p = path.replace(self.local_path + os.path.sep, '')
for f in l:
fileName, fileExtension = os.path.splitext(f)
if fileExtension[1:] in self.audio_extensions and fileName in refined_files:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
def read_chunk_meta(self, meta_filename):
if os.path.isfile(meta_filename):
meta_file_handle = open(meta_filename, 'rt')
try:
meta_file_reader = csv.reader(meta_file_handle, delimiter=',')
data = {}
for meta_file_row in meta_file_reader:
data[meta_file_row[0]] = meta_file_row[1]
finally:
meta_file_handle.close()
return data
def tagcode_to_taglabel(self, tag):
map = {'c': 'child speech',
'm': 'adult male speech',
'f': 'adult female speech',
'v': 'video game/tv',
'p': 'percussive sound',
'b': 'broadband noise',
'o': 'other',
'S': 'silence/background',
'U': 'unidentifiable'
}
if tag in map:
return map[tag]
else:
return None
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Legacy dataset meta files are converted to be compatible with current scheme.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
scene_label = 'home'
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(raw_path, base_filename + '.csv')
meta_data = self.read_chunk_meta(annotation_filename)
tags = []
for i, tag in enumerate(meta_data['majorityvote']):
if tag is 'b':
print file
if tag is not 'S' and tag is not 'U':
tags.append(self.tagcode_to_taglabel(tag))
tags = ';'.join(tags)
writer.writerow(
(os.path.join(relative_path, raw_filename), scene_label, meta_data['majorityvote'], tags))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
for target_tag in self.audio_tags:
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_test.txt')):
all_folds_found = False
if not all_folds_found:
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
numpy.random.seed(475686)
kf = KFold(n=len(self.audio_files), n_folds=self.evaluation_folds, shuffle=True)
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(self.relative_to_absolute_path(os.path.join('chime_home','chunks',row[1]+'.wav')))
fold = 1
files = numpy.array(refined_files)
for train_index, test_index in kf:
train_files = files[train_index]
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],item['tag_string'], ';'.join(item['tags'])])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],item['tag_string'], ';'.join(item['tags'])])
fold+= 1
# Legacy datasets
# =====================================================
# DCASE 2013
# =====================================================
class DCASE2013_Scene_DevelopmentSet(Dataset):
"""DCASE 2013 Acoustic scene classification, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Public Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/29/scenes_stereo.zip?sequence=1',
'local_package': os.path.join(self.local_path, 'scenes_stereo.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
print self.evaluation_setup_path
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
class DCASE2013_Scene_EvaluationSet(DCASE2013_Scene_DevelopmentSet):
"""DCASE 2013 Acoustic scene classification, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Private Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_scene_classification_testset/scenes_stereo_testset.zip',
'local_package': os.path.join(self.local_path, 'scenes_stereo_testset.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo_testset'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file) or 1:
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
# Sound events
class DCASE2013_Event_DevelopmentSet(Dataset):
"""DCASE 2013 Sound event detection, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Public Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_development_OS/events_OS_development_v2.zip',
'local_package': os.path.join(self.local_path, 'events_OS_development_v2.zip'),
'local_audio_path': os.path.join(self.local_path, 'events_OS_development_v2'),
},
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_annotation.zip?sequence=9',
# 'local_package': os.path.join(self.local_path, 'singlesounds_annotation.zip'),
# 'local_audio_path': None,
# },
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_stereo.zip?sequence=7',
# 'local_package': os.path.join(self.local_path, 'singlesounds_stereo.zip'),
# 'local_audio_path': os.path.join(self.local_path, 'singlesounds_stereo'),
# },
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('singlesounds_stereo') != -1:
annotation_filename = os.path.join(self.local_path, 'Annotation1', base_filename + '_bdm.txt')
label = base_filename[:-2]
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1], label, 'i'))
finally:
annotation_file_handle.close()
elif file.find('events_OS_development_v2') != -1:
annotation_filename = os.path.join(self.local_path, 'events_OS_development_v2',
base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
class DCASE2013_Event_EvaluationSet(Dataset):
"""DCASE 2013 Sound event detection, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Private Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_testset_OS/dcase2013_event_detection_testset_OS.zip',
'local_package': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS.zip'),
'local_audio_path': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('dcase2013_event_detection_testset_OS') != -1:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
else:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',base_filename + '.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
| mit |
sharag/py_analis | razlad/for_stat.py | 1 | 7047 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from razlad.functions import f_probability, max_probabil
import pickle
# Формирвоание скачков
# Характеристики скачков
graph_len = 1000
surge_len = 200
surges = []
surge_ps = [0]*((graph_len - surge_len)//2)
surge_ps.extend([1]*surge_len)
surge_ps.extend([0]*int((graph_len - surge_len)/2))
surges.append(surge_ps)
surge_lin_ps = [0.0]*((graph_len - surge_len)//2)
surge_lin_ps.extend([x/surge_len for x in range(surge_len)])
surge_lin_ps.extend([1.0]*((graph_len - surge_len)//2))
surges.append(surge_lin_ps)
#surge_lin_ps_inv = [1.0]*((graph_len - surge_len)//2)
#surge_lin_ps_inv.extend([(1 - x/surge_len) for x in range(surge_len)])
#surge_lin_ps_inv.extend([0.0]*((graph_len - surge_len)//2))
surge_x1 = [0.0]*((graph_len - surge_len)//2)
surge_x1.extend([x/surge_len for x in range(surge_len)])
surge_x1.extend([(1 - x/surge_len) for x in range(surge_len)])
surge_x1.extend([0.0]*((graph_len - surge_len)//2))
surges.append(surge_x1)
#surge_x2_ps = [0.0]*((graph_len - surge_len)//2)
#surge_x2_ps.extend([((x*2/surge_len)**2)/2 for x in range(surge_len//2)])
#surge_x2_ps.extend([(1 - ((x - surge_len/2)/(surge_len//2))**2)/2 + 0.5 for x in range(surge_len//2)])
#surge_x2_ps.extend([1.0]*((graph_len - surge_len)//2))
#surge_x2_ps_inv = [1.0]*((graph_len - surge_len)//2)
#surge_x2_ps_inv.extend([1 - (((x*2/surge_len)**2)/2) for x in range(surge_len//2)])
#surge_x2_ps_inv.extend([1 - ((1 - ((x - surge_len/2)/(surge_len//2))**2)/2 + 0.5) for x in range(surge_len//2)])
#surge_x2_ps_inv.extend([0.0]*((graph_len - surge_len)//2))
surge_len_05 = surge_len//2
surge_x2 = [0.0]*((graph_len - surge_len)//2)
surge_x2.extend([((x*2/surge_len_05)**2)/2 for x in range(surge_len_05//2)])
surge_x2.extend([(1 - ((x - surge_len_05/2)/(surge_len_05//2))**2)/2 + 0.5 for x in range(surge_len_05//2)])
surge_x2.extend([1 - (((x*2/surge_len_05)**2)/2) for x in range(surge_len_05//2)])
surge_x2.extend([1 - ((1 - ((x - surge_len_05/2)/(surge_len_05//2))**2)/2 + 0.5) for x in range(surge_len_05//2)])
surge_x2.extend([0.0]*((graph_len - surge_len)//2))
surges.append(surge_x2)
with open('for_stat.pickle', 'wb') as f:
pickle.dump(surges, f)
plt.figure(1)
ax = plt.subplot(1, 1, 1)
plt.plot(surge_ps, linewidth=1)
plt.plot(surge_lin_ps, linewidth=1)
plt.plot(surge_x2, linewidth=1)
plt.grid(True)
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(16)
#plt.xscale('log')
plt.show()
# Подбор размеров окна
with open('for_stat.pickle', 'rb') as f:
surges = pickle.load(f)
# Характеристики окна
win_size = 200
# скачок постоянной составляющей
len_win, len_win_bef, probabl_ps = max_probabil(surge_ps, win_size, surge_len, 2)
x_mesh_bef_ps, y_mesh_bef_ps = np.meshgrid(len_win, len_win_bef)
# линейное изменение постоянной составляющей
len_win, len_win_bef, probabl_lin_ps = max_probabil(surge_lin_ps, win_size, surge_len, 2)
x_mesh_bef_lin_ps, y_mesh_bef_lin_ps = np.meshgrid(len_win, len_win_bef)
# нелинейное изменение постоянной составляющей и обратно
len_win, len_win_bef, probabl_x2_ps_obr = max_probabil(surge_x2, win_size, surge_len, 2)
x_mesh_bef_x2, y_mesh_bef_x2 = np.meshgrid(len_win, len_win_bef)
# скачок постоянной составляющей
fig = plt.figure(1)
ax = fig.gca(projection='3d')
# Plot the surface.
surf = ax.plot_surface(x_mesh_bef_ps, y_mesh_bef_ps, probabl_ps.transpose(), cmap=cm.seismic, linewidth=2,
antialiased=True)
# Customize the z axis.
#ax.set_zlim(-1.01, 1.01)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
#ax.xaxis(len_win_x)
ax.set_xlabel('Win len')
ax.set_ylabel('win_before')
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
plt.figure(2)
plt.subplot(1, 1, 1)
plt.plot(surge_ps, linewidth=3)
plt.grid(True)
plt.show()
# линейное изменение постоянной составляющей
fig = plt.figure(3)
ax = fig.gca(projection='3d')
# Plot the surface.
surf = ax.plot_surface(x_mesh_bef_lin_ps, y_mesh_bef_lin_ps, probabl_lin_ps.transpose(), cmap=cm.seismic, linewidth=2, antialiased=True)
# Customize the z axis.
#ax.set_zlim(-1.01, 1.01)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
#ax.xaxis(len_win_x)
ax.set_xlabel('Win len')
ax.set_ylabel('win_before')
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
plt.figure(4)
plt.subplot(1, 1, 1)
plt.plot(surge_lin_ps, linewidth=3)
plt.grid(True)
plt.show()
# нелинейное изменение постоянной составляющей
fig = plt.figure(5)
ax = fig.gca(projection='3d')
# Plot the surface.
#surf = ax.plot_surface(x_mesh_bef_x2_ps, y_mesh_bef_x2_ps, probabl_x2_ps.transpose(), cmap=cm.seismic, linewidth=2, antialiased=True)
# Customize the z axis.
#ax.set_zlim(-1.01, 1.01)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
#ax.xaxis(len_win_x)
ax.set_xlabel('Win len')
ax.set_ylabel('win_before')
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
plt.figure(6)
plt.subplot(1, 1, 1)
#plt.plot(surge_x2_ps, linewidth=3)
plt.grid(True)
plt.show()
# нелинейное изменение постоянной составляющей и обратно
fig = plt.figure(7)
ax = fig.gca(projection='3d')
# Plot the surface.
#surf = ax.plot_surface(x_mesh_bef_x2_ps_obr, y_mesh_bef_x2_ps_obr, probabl_x2_ps_obr.transpose(), cmap=cm.seismic, linewidth=2, antialiased=True)
# Customize the z axis.
#ax.set_zlim(-1.01, 1.01)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
#ax.xaxis(len_win_x)
ax.set_xlabel('Win len')
ax.set_ylabel('win_before')
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
plt.figure(8)
plt.subplot(1, 1, 1)
#plt.plot(surge_x2_ps_obr, linewidth=3)
plt.grid(True)
plt.show()
# Остатки
plt.figure(9)
plt.subplot(1, 1, 1)
#plt.plot(surge_lin_ps_inv, linewidth=3)
plt.grid(True)
plt.show()
plt.figure(10)
plt.subplot(1, 1, 1)
#plt.plot(surge_lin_ps_obr, linewidth=3)
plt.grid(True)
plt.show()
plt.figure(11)
plt.subplot(1, 1, 1)
#plt.plot(surge_x2_ps, linewidth=3)
plt.grid(True)
plt.show()
plt.figure(12)
plt.subplot(1, 1, 1)
#plt.plot(surge_x2_ps_inv, linewidth=3)
plt.grid(True)
plt.show()
| gpl-3.0 |
carrillo/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
gammalib/gammalib | inst/cta/test/dev/example_sim_photons.py | 1 | 4984 | #! /usr/bin/env python
# ==========================================================================
# This script illustrates how the GammaLib photon simulator works.
#
# Based on the MAGIC spectrum of the Crab nebula, and by assuming a powerlaw,
# it will create a Monte Carlo sample of photons.
#
# If matplotlib is installed, the spectrum will be displayed on the screen.
#
# --------------------------------------------------------------------------
#
# Copyright (C) 2013 Juergen Knoedlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
from gammalib import *
from math import *
# ======================== #
# Simulate CTA observation #
# ======================== #
def simulate(xmlname, e_min, e_max, area, duration):
"""
Simulate CTA observation.
"""
# Allocate MC parameters
dir = GSkyDir()
emin = GEnergy()
emax = GEnergy()
tmin = GTime(0.0)
tmax = GTime(duration)
# Define MC parameters
dir.radec_deg(83.6331, 22.0145)
radius = 10.0
emin.TeV(e_min)
emax.TeV(e_max)
# Allocate random number generator
ran = GRan()
# Load models and extract first model
models = GModels(xmlname)
model = models[0]
print(model)
# Simulate photons
photons = model.mc(area, dir, radius, emin, emax, tmin, tmax, ran)
# Print photons
print(str(len(photons)) + " photons simulated.")
# Return photons
return photons
# ============ #
# Show photons #
# ============ #
def show_photons(photons, xmlname, e_min, e_max, area, duration, ebins=30):
"""
Show photons using matplotlib (if available).
"""
# Only proceed if matplotlib is available
try:
# Import matplotlib
import matplotlib.pyplot as plt
# Create figure
plt.figure(1)
plt.title("MC simulated photon spectrum (" + str(e_min) + '-' + str(e_max) + " TeV)")
# Setup energy range covered by data
ebds = GEbounds(ebins, GEnergy(e_min, "TeV"), GEnergy(e_max, "TeV"))
# Create energy axis
energy = []
for i in range(ebds.size()):
energy.append(ebds.elogmean(i).TeV())
# Fill histogram
counts = [0.0 for i in range(ebds.size())]
for photon in photons:
index = ebds.index(photon.energy())
counts[index] = counts[index] + 1.0
# Create error bars
error = [sqrt(c) for c in counts]
# Get model values
models = GModels(xmlname)
crab = models[0]
model = []
d = GSkyDir()
d.radec_deg(83.6331, 22.0145)
t = GTime()
for i in range(ebds.size()):
eval = ebds.elogmean(i)
ewidth = ebds.emax(i) - ebds.emin(i)
f = crab.value(GPhoton(d, eval, t)) * \
area * duration * ewidth.MeV()
model.append(f)
# Plot data
plt.loglog(energy, counts, 'ro')
plt.errorbar(energy, counts, error, ecolor='r')
# Plot model
plt.plot(energy, model, 'b-')
# Set axes
plt.xlabel("Energy (TeV)")
plt.ylabel("Number of incident photons")
# Notify
print("PLEASE CLOSE WINDOW TO CONTINUE ...")
# Allocate histogram
# Show plot
plt.show()
except ImportError:
print("Matplotlib is not (correctly) installed on your system.")
# Return
return
#==========================#
# Main routine entry point #
#==========================#
if __name__ == '__main__':
# Dump header
print('')
print('********************')
print('* Simulate photons *')
print('********************')
# Set XML names
xmlnames = ['../data/crab.xml',
'../data/crab_eplaw.xml',
'../data/crab_file_function.xml',
'../data/crab_file_function_mod.xml']
# Set simulation parameters
e_min = 0.1 # 0.1 TeV
e_max = 100.0 # 100 TeV
area = 3200000.0 * 1.0e4 # 3200000.0 m^2
duration = 3600.0 * 5 # 5 hours
# Loop over models
for xmlname in xmlnames:
# Perform simulation
photons = simulate(xmlname, e_min, e_max, area, duration)
# Show photons
show_photons(photons, xmlname, e_min, e_max, area, duration)
| gpl-3.0 |
Vimos/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 112 | 1819 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
nvoron23/statsmodels | statsmodels/stats/tests/test_power.py | 28 | 25876 | # -*- coding: utf-8 -*-
# pylint: disable=W0231, W0142
"""Tests for statistical power calculations
Note:
tests for chisquare power are in test_gof.py
Created on Sat Mar 09 08:44:49 2013
Author: Josef Perktold
"""
import copy
import numpy as np
from numpy.testing import (assert_almost_equal, assert_allclose, assert_raises,
assert_equal, assert_warns)
import statsmodels.stats.power as smp
import warnings
#from .test_weightstats import CheckPowerMixin
from statsmodels.stats.tests.test_weightstats import Holder
# for testing plots
import nose
from numpy.testing import dec
try:
import matplotlib.pyplot as plt #makes plt available for test functions
have_matplotlib = True
except ImportError:
have_matplotlib = False
class CheckPowerMixin(object):
def test_power(self):
#test against R results
kwds = copy.copy(self.kwds)
del kwds['power']
kwds.update(self.kwds_extra)
if hasattr(self, 'decimal'):
decimal = self.decimal
else:
decimal = 6
res1 = self.cls()
assert_almost_equal(res1.power(**kwds), self.res2.power, decimal=decimal)
def test_positional(self):
res1 = self.cls()
kwds = copy.copy(self.kwds)
del kwds['power']
kwds.update(self.kwds_extra)
# positional args
if hasattr(self, 'args_names'):
args_names = self.args_names
else:
nobs_ = 'nobs' if 'nobs' in kwds else 'nobs1'
args_names = ['effect_size', nobs_, 'alpha']
# pop positional args
args = [kwds.pop(arg) for arg in args_names]
if hasattr(self, 'decimal'):
decimal = self.decimal
else:
decimal = 6
res = res1.power(*args, **kwds)
assert_almost_equal(res, self.res2.power, decimal=decimal)
def test_roots(self):
kwds = copy.copy(self.kwds)
kwds.update(self.kwds_extra)
# kwds_extra are used as argument, but not as target for root
for key in self.kwds:
# keep print to check whether tests are really executed
#print 'testing roots', key
value = kwds[key]
kwds[key] = None
result = self.cls().solve_power(**kwds)
assert_allclose(result, value, rtol=0.001, err_msg=key+' failed')
# yield can be used to investigate specific errors
#yield assert_allclose, result, value, 0.001, 0, key+' failed'
kwds[key] = value # reset dict
@dec.skipif(not have_matplotlib)
def test_power_plot(self):
if self.cls == smp.FTestPower:
raise nose.SkipTest('skip FTestPower plot_power')
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
fig = self.cls().plot_power(dep_var='nobs',
nobs= np.arange(2, 100),
effect_size=np.array([0.1, 0.2, 0.3, 0.5, 1]),
#alternative='larger',
ax=ax, title='Power of t-Test',
**self.kwds_extra)
ax = fig.add_subplot(2,1,2)
fig = self.cls().plot_power(dep_var='es',
nobs=np.array([10, 20, 30, 50, 70, 100]),
effect_size=np.linspace(0.01, 2, 51),
#alternative='larger',
ax=ax, title='',
**self.kwds_extra)
plt.close('all')
#''' test cases
#one sample
# two-sided one-sided
#large power OneS1 OneS3
#small power OneS2 OneS4
#
#two sample
# two-sided one-sided
#large power TwoS1 TwoS3
#small power TwoS2 TwoS4
#small p, ratio TwoS4 TwoS5
#'''
class TestTTPowerOneS1(CheckPowerMixin):
def __init__(self):
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="two.sample",alternative="two.sided")
#> cat_items(p, prefix='tt_power2_1.')
res2 = Holder()
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.9995636009612725
res2.alternative = 'two.sided'
res2.note = 'NULL'
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
self.kwds_extra = {}
self.cls = smp.TTestPower
class TestTTPowerOneS2(CheckPowerMixin):
# case with small power
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.2,n=20,sig.level=0.05,type="one.sample",alternative="two.sided")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.2
res2.sig_level = 0.05
res2.power = 0.1359562887679666
res2.alternative = 'two.sided'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
self.kwds_extra = {}
self.cls = smp.TTestPower
class TestTTPowerOneS3(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="one.sample",alternative="greater")
#> cat_items(p, prefix='tt_power1_1g.')
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.999892010204909
res2.alternative = 'greater'
res2.note = 'NULL'
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestPower
class TestTTPowerOneS4(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.05,n=20,sig.level=0.05,type="one.sample",alternative="greater")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.05
res2.sig_level = 0.05
res2.power = 0.0764888785042198
res2.alternative = 'greater'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestPower
class TestTTPowerOneS5(CheckPowerMixin):
# case one-sided less, not implemented yet
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.2,n=20,sig.level=0.05,type="one.sample",alternative="less")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.2
res2.sig_level = 0.05
res2.power = 0.006063932667926375
res2.alternative = 'less'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'smaller'}
self.cls = smp.TTestPower
class TestTTPowerOneS6(CheckPowerMixin):
# case one-sided less, negative effect size, not implemented yet
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=-0.2,n=20,sig.level=0.05,type="one.sample",alternative="less")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = -0.2
res2.sig_level = 0.05
res2.power = 0.21707518167191
res2.alternative = 'less'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'smaller'}
self.cls = smp.TTestPower
class TestTTPowerTwoS1(CheckPowerMixin):
def __init__(self):
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="two.sample",alternative="two.sided")
#> cat_items(p, prefix='tt_power2_1.')
res2 = Holder()
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.967708258242517
res2.alternative = 'two.sided'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power': res2.power, 'ratio': 1}
self.kwds_extra = {}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS2(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.1,n=20,sig.level=0.05,type="two.sample",alternative="two.sided")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.1
res2.sig_level = 0.05
res2.power = 0.06095912465411235
res2.alternative = 'two.sided'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power': res2.power, 'ratio': 1}
self.kwds_extra = {}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS3(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="two.sample",alternative="greater")
#> cat_items(p, prefix='tt_power2_1g.')
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.985459690251624
res2.alternative = 'greater'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS4(CheckPowerMixin):
# case with small power
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.01,n=30,sig.level=0.05,type="two.sample",alternative="greater")
#> cat_items(p, "res2.")
res2.n = 30
res2.d = 0.01
res2.sig_level = 0.05
res2.power = 0.0540740302835667
res2.alternative = 'greater'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS5(CheckPowerMixin):
# case with unequal n, ratio>1
def __init__(self):
res2 = Holder()
#> p = pwr.t2n.test(d=0.1,n1=20, n2=30,sig.level=0.05,alternative="two.sided")
#> cat_items(p, "res2.")
res2.n1 = 20
res2.n2 = 30
res2.d = 0.1
res2.sig_level = 0.05
res2.power = 0.0633081832564667
res2.alternative = 'two.sided'
res2.method = 't test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n1,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1.5}
self.kwds_extra = {'alternative': 'two-sided'}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS6(CheckPowerMixin):
# case with unequal n, ratio>1
def __init__(self):
res2 = Holder()
#> p = pwr.t2n.test(d=0.1,n1=20, n2=30,sig.level=0.05,alternative="greater")
#> cat_items(p, "res2.")
res2.n1 = 20
res2.n2 = 30
res2.d = 0.1
res2.sig_level = 0.05
res2.power = 0.09623589080917805
res2.alternative = 'greater'
res2.method = 't test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n1,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1.5}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestIndPower
def test_normal_power_explicit():
# a few initial test cases for NormalIndPower
sigma = 1
d = 0.3
nobs = 80
alpha = 0.05
res1 = smp.normal_power(d, nobs/2., 0.05)
res2 = smp.NormalIndPower().power(d, nobs, 0.05)
res3 = smp.NormalIndPower().solve_power(effect_size=0.3, nobs1=80, alpha=0.05, power=None)
res_R = 0.475100870572638
assert_almost_equal(res1, res_R, decimal=13)
assert_almost_equal(res2, res_R, decimal=13)
assert_almost_equal(res3, res_R, decimal=13)
norm_pow = smp.normal_power(-0.01, nobs/2., 0.05)
norm_pow_R = 0.05045832927039234
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="two.sided")
assert_almost_equal(norm_pow, norm_pow_R, decimal=11)
norm_pow = smp.NormalIndPower().power(0.01, nobs, 0.05,
alternative="larger")
norm_pow_R = 0.056869534873146124
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="greater")
assert_almost_equal(norm_pow, norm_pow_R, decimal=11)
# Note: negative effect size is same as switching one-sided alternative
# TODO: should I switch to larger/smaller instead of "one-sided" options
norm_pow = smp.NormalIndPower().power(-0.01, nobs, 0.05,
alternative="larger")
norm_pow_R = 0.0438089705093578
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="less")
assert_almost_equal(norm_pow, norm_pow_R, decimal=11)
class TestNormalIndPower1(CheckPowerMixin):
def __init__(self):
#> example from above
# results copied not directly from R
res2 = Holder()
res2.n = 80
res2.d = 0.3
res2.sig_level = 0.05
res2.power = 0.475100870572638
res2.alternative = 'two.sided'
res2.note = 'NULL'
res2.method = 'two sample power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1}
self.kwds_extra = {}
self.cls = smp.NormalIndPower
class TestNormalIndPower2(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> np = pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="less")
#> cat_items(np, "res2.")
res2.h = 0.01
res2.n = 80
res2.sig_level = 0.05
res2.power = 0.0438089705093578
res2.alternative = 'less'
res2.method = ('Difference of proportion power calculation for' +
' binomial distribution (arcsine transformation)')
res2.note = 'same sample sizes'
self.res2 = res2
self.kwds = {'effect_size': res2.h, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1}
self.kwds_extra = {'alternative':'smaller'}
self.cls = smp.NormalIndPower
class TestNormalIndPower_onesamp1(CheckPowerMixin):
def __init__(self):
# forcing one-sample by using ratio=0
#> example from above
# results copied not directly from R
res2 = Holder()
res2.n = 40
res2.d = 0.3
res2.sig_level = 0.05
res2.power = 0.475100870572638
res2.alternative = 'two.sided'
res2.note = 'NULL'
res2.method = 'two sample power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
# keyword for which we don't look for root:
self.kwds_extra = {'ratio': 0}
self.cls = smp.NormalIndPower
class TestNormalIndPower_onesamp2(CheckPowerMixin):
# Note: same power as two sample case with twice as many observations
def __init__(self):
# forcing one-sample by using ratio=0
res2 = Holder()
#> np = pwr.norm.test(d=0.01,n=40,sig.level=0.05,alternative="less")
#> cat_items(np, "res2.")
res2.d = 0.01
res2.n = 40
res2.sig_level = 0.05
res2.power = 0.0438089705093578
res2.alternative = 'less'
res2.method = 'Mean power calculation for normal distribution with known variance'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
# keyword for which we don't look for root:
self.kwds_extra = {'ratio': 0, 'alternative':'smaller'}
self.cls = smp.NormalIndPower
class TestChisquarePower(CheckPowerMixin):
def __init__(self):
# one example from test_gof, results_power
res2 = Holder()
res2.w = 0.1
res2.N = 5
res2.df = 4
res2.sig_level = 0.05
res2.power = 0.05246644635810126
res2.method = 'Chi squared power calculation'
res2.note = 'N is the number of observations'
self.res2 = res2
self.kwds = {'effect_size': res2.w, 'nobs': res2.N,
'alpha': res2.sig_level, 'power':res2.power}
# keyword for which we don't look for root:
# solving for n_bins doesn't work, will not be used in regular usage
self.kwds_extra = {'n_bins': res2.df + 1}
self.cls = smp.GofChisquarePower
def _test_positional(self):
res1 = self.cls()
args_names = ['effect_size','nobs', 'alpha', 'n_bins']
kwds = copy.copy(self.kwds)
del kwds['power']
kwds.update(self.kwds_extra)
args = [kwds[arg] for arg in args_names]
if hasattr(self, 'decimal'):
decimal = self.decimal #pylint: disable-msg=E1101
else:
decimal = 6
assert_almost_equal(res1.power(*args), self.res2.power, decimal=decimal)
def test_ftest_power():
#equivalence ftest, ttest
for alpha in [0.01, 0.05, 0.1, 0.20, 0.50]:
res0 = smp.ttest_power(0.01, 200, alpha)
res1 = smp.ftest_power(0.01, 199, 1, alpha=alpha, ncc=0)
assert_almost_equal(res1, res0, decimal=6)
#example from Gplus documentation F-test ANOVA
#Total sample size:200
#Effect size "f":0.25
#Beta/alpha ratio:1
#Result:
#Alpha:0.1592
#Power (1-beta):0.8408
#Critical F:1.4762
#Lambda: 12.50000
res1 = smp.ftest_anova_power(0.25, 200, 0.1592, k_groups=10)
res0 = 0.8408
assert_almost_equal(res1, res0, decimal=4)
# TODO: no class yet
# examples agains R::pwr
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=199, f2=0.1**2, sig.level=0.01)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 199
res2.f2 = 0.01
res2.sig_level = 0.01
res2.power = 0.0494137732920332
res2.method = 'Multiple regression power calculation'
res1 = smp.ftest_power(np.sqrt(res2.f2), res2.v, res2.u,
alpha=res2.sig_level, ncc=1)
assert_almost_equal(res1, res2.power, decimal=5)
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=199, f2=0.3**2, sig.level=0.01)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 199
res2.f2 = 0.09
res2.sig_level = 0.01
res2.power = 0.7967191006290872
res2.method = 'Multiple regression power calculation'
res1 = smp.ftest_power(np.sqrt(res2.f2), res2.v, res2.u,
alpha=res2.sig_level, ncc=1)
assert_almost_equal(res1, res2.power, decimal=5)
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=19, f2=0.3**2, sig.level=0.1)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 19
res2.f2 = 0.09
res2.sig_level = 0.1
res2.power = 0.235454222377575
res2.method = 'Multiple regression power calculation'
res1 = smp.ftest_power(np.sqrt(res2.f2), res2.v, res2.u,
alpha=res2.sig_level, ncc=1)
assert_almost_equal(res1, res2.power, decimal=5)
# class based version of two above test for Ftest
class TestFtestAnovaPower(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#example from Gplus documentation F-test ANOVA
#Total sample size:200
#Effect size "f":0.25
#Beta/alpha ratio:1
#Result:
#Alpha:0.1592
#Power (1-beta):0.8408
#Critical F:1.4762
#Lambda: 12.50000
#converted to res2 by hand
res2.f = 0.25
res2.n = 200
res2.k = 10
res2.alpha = 0.1592
res2.power = 0.8408
res2.method = 'Multiple regression power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.f, 'nobs': res2.n,
'alpha': res2.alpha, 'power': res2.power}
# keyword for which we don't look for root:
# solving for n_bins doesn't work, will not be used in regular usage
self.kwds_extra = {'k_groups': res2.k} # rootfinding doesn't work
#self.args_names = ['effect_size','nobs', 'alpha']#, 'k_groups']
self.cls = smp.FTestAnovaPower
# precision for test_power
self.decimal = 4
class TestFtestPower(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=19, f2=0.3**2, sig.level=0.1)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 19
res2.f2 = 0.09
res2.sig_level = 0.1
res2.power = 0.235454222377575
res2.method = 'Multiple regression power calculation'
self.res2 = res2
self.kwds = {'effect_size': np.sqrt(res2.f2), 'df_num': res2.v,
'df_denom': res2.u, 'alpha': res2.sig_level,
'power': res2.power}
# keyword for which we don't look for root:
# solving for n_bins doesn't work, will not be used in regular usage
self.kwds_extra = {}
self.args_names = ['effect_size', 'df_num', 'df_denom', 'alpha']
self.cls = smp.FTestPower
# precision for test_power
self.decimal = 5
def test_power_solver():
# messing up the solver to trigger backup
nip = smp.NormalIndPower()
# check result
es0 = 0.1
pow_ = nip.solve_power(es0, nobs1=1600, alpha=0.01, power=None, ratio=1,
alternative='larger')
# value is regression test
assert_almost_equal(pow_, 0.69219411243824214, decimal=5)
es = nip.solve_power(None, nobs1=1600, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_almost_equal(es, es0, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 2)
# cause first optimizer to fail
nip.start_bqexp['effect_size'] = {'upp': -10, 'low': -20}
nip.start_ttp['effect_size'] = 0.14
es = nip.solve_power(None, nobs1=1600, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_almost_equal(es, es0, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 3, err_msg=repr(nip.cache_fit_res))
nip.start_ttp['effect_size'] = np.nan
es = nip.solve_power(None, nobs1=1600, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_almost_equal(es, es0, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 4)
# I let this case fail, could be fixed for some statistical tests
# (we shouldn't get here in the first place)
# effect size is negative, but last stage brentq uses [1e-8, 1-1e-8]
assert_raises(ValueError, nip.solve_power, None, nobs1=1600, alpha=0.01,
power=0.005, ratio=1, alternative='larger')
def test_power_solver_warn():
# messing up the solver to trigger warning
# I wrote this with scipy 0.9,
# convergence behavior of scipy 0.11 is different,
# fails at a different case, but is successful where it failed before
pow_ = 0.69219411243824214 # from previous function
nip = smp.NormalIndPower()
# using nobs, has one backup (fsolve)
nip.start_bqexp['nobs1'] = {'upp': 50, 'low': -20}
val = nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
import scipy
if scipy.__version__ < '0.10':
assert_almost_equal(val, 1600, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 3)
# case that has convergence failure, and should warn
nip.start_ttp['nobs1'] = np.nan
from statsmodels.tools.sm_exceptions import ConvergenceWarning
assert_warns(ConvergenceWarning, nip.solve_power, 0.1, nobs1=None,
alpha=0.01, power=pow_, ratio=1, alternative='larger')
# this converges with scipy 0.11 ???
# nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1, alternative='larger')
with warnings.catch_warnings(): # python >= 2.6
warnings.simplefilter("ignore")
val = nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_equal(nip.cache_fit_res[0], 0)
assert_equal(len(nip.cache_fit_res), 3)
if __name__ == '__main__':
test_normal_power_explicit()
nt = TestNormalIndPower1()
nt.test_power()
nt.test_roots()
nt = TestNormalIndPower_onesamp1()
nt.test_power()
nt.test_roots()
| bsd-3-clause |
mattilyra/gensim | gensim/sklearn_api/w2vmodel.py | 1 | 8438 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Chinmaya Pancholi <[email protected]>
# Copyright (C) 2017 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit learn interface for :class:`~gensim.models.word2vec.Word2Vec`.
Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
Examples
--------
>>> from gensim.test.utils import common_texts
>>> from gensim.sklearn_api import W2VTransformer
>>>
>>> # Create a model to represent each word by a 10 dimensional vector.
>>> model = W2VTransformer(size=10, min_count=1, seed=1)
>>>
>>> # What is the vector representation of the word 'graph'?
>>> wordvecs = model.fit(common_texts).transform(['graph', 'system'])
>>> assert wordvecs.shape == (2, 10)
"""
import numpy as np
import six
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
class W2VTransformer(TransformerMixin, BaseEstimator):
"""Base Word2Vec module, wraps :class:`~gensim.models.word2vec.Word2Vec`.
For more information please have a look to `Tomas Mikolov, Kai Chen, Greg Corrado, Jeffrey Dean: "Efficient
Estimation of Word Representations in Vector Space" <https://arxiv.org/abs/1301.3781>`_.
"""
def __init__(self, size=100, alpha=0.025, window=5, min_count=5, max_vocab_size=None, sample=1e-3, seed=1,
workers=3, min_alpha=0.0001, sg=0, hs=0, negative=5, cbow_mean=1, hashfxn=hash, iter=5, null_word=0,
trim_rule=None, sorted_vocab=1, batch_words=10000):
"""
Parameters
----------
size : int
Dimensionality of the feature vectors.
alpha : float
The initial learning rate.
window : int
The maximum distance between the current and predicted word within a sentence.
min_count : int
Ignores all words with total frequency lower than this.
max_vocab_size : int
Limits the RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
Set to `None` for no limit.
sample : float
The threshold for configuring which higher-frequency words are randomly downsampled,
useful range is (0, 1e-5).
seed : int
Seed for the random number generator. Initial vectors for each word are seeded with a hash of
the concatenation of word + `str(seed)`. Note that for a fully deterministically-reproducible run,
you must also limit the model to a single worker thread (`workers=1`), to eliminate ordering jitter
from OS thread scheduling. (In Python 3, reproducibility between interpreter launches also requires
use of the `PYTHONHASHSEED` environment variable to control hash randomization).
workers : int
Use these many worker threads to train the model (=faster training with multicore machines).
min_alpha : float
Learning rate will linearly drop to `min_alpha` as training progresses.
sg : int {1, 0}
Defines the training algorithm. If 1, CBOW is used, otherwise, skip-gram is employed.
hs : int {1,0}
If 1, hierarchical softmax will be used for model training.
If set to 0, and `negative` is non-zero, negative sampling will be used.
negative : int
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If set to 0, no negative sampling is used.
cbow_mean : int {1,0}
If 0, use the sum of the context word vectors. If 1, use the mean, only applies when cbow is used.
hashfxn : callable (object -> int), optional
A hashing function. Used to create an initial random reproducible vector by hashing the random seed.
iter : int
Number of iterations (epochs) over the corpus.
null_word : int {1, 0}
If 1, a null pseudo-word will be created for padding when using concatenative L1 (run-of-words)
trim_rule : function
Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
or a callable that accepts parameters (word, count, min_count) and returns either
:attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
Note: The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part
of the model.
sorted_vocab : int {1,0}
If 1, sort the vocabulary by descending frequency before assigning word indexes.
batch_words : int
Target size (in words) for batches of examples passed to worker threads (and
thus cython routines).(Larger batches will be passed if individual
texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
"""
self.gensim_model = None
self.size = size
self.alpha = alpha
self.window = window
self.min_count = min_count
self.max_vocab_size = max_vocab_size
self.sample = sample
self.seed = seed
self.workers = workers
self.min_alpha = min_alpha
self.sg = sg
self.hs = hs
self.negative = negative
self.cbow_mean = int(cbow_mean)
self.hashfxn = hashfxn
self.iter = iter
self.null_word = null_word
self.trim_rule = trim_rule
self.sorted_vocab = sorted_vocab
self.batch_words = batch_words
def fit(self, X, y=None):
"""Fit the model according to the given training data.
Parameters
----------
X : iterable of iterables of str
The input corpus. X can be simply a list of lists of tokens, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
Returns
-------
:class:`~gensim.sklearn_api.w2vmodel.W2VTransformer`
The trained model.
"""
self.gensim_model = models.Word2Vec(
sentences=X, size=self.size, alpha=self.alpha,
window=self.window, min_count=self.min_count, max_vocab_size=self.max_vocab_size,
sample=self.sample, seed=self.seed, workers=self.workers, min_alpha=self.min_alpha,
sg=self.sg, hs=self.hs, negative=self.negative, cbow_mean=self.cbow_mean,
hashfxn=self.hashfxn, iter=self.iter, null_word=self.null_word, trim_rule=self.trim_rule,
sorted_vocab=self.sorted_vocab, batch_words=self.batch_words
)
return self
def transform(self, words):
"""Get the word vectors the input words.
Parameters
----------
words : {iterable of str, str}
Word or a collection of words to be transformed.
Returns
-------
np.ndarray of shape [`len(words)`, `size`]
A 2D array where each row is the vector of one word.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(words, six.string_types):
words = [words]
vectors = [self.gensim_model[word] for word in words]
return np.reshape(np.array(vectors), (len(words), self.size))
def partial_fit(self, X):
raise NotImplementedError(
"'partial_fit' has not been implemented for W2VTransformer. "
"However, the model can be updated with a fixed vocabulary using Gensim API call."
)
| lgpl-2.1 |
cxhernandez/msmbuilder | msmbuilder/tests/test_dataset.py | 7 | 6441 | from __future__ import print_function, absolute_import, division
import os
import shutil
import tempfile
import numpy as np
from mdtraj.testing import get_fn
from nose.tools import assert_raises, assert_raises_regexp
from sklearn.externals.joblib import Parallel, delayed
from msmbuilder.dataset import dataset
from .test_commands import tempdir
# Nose wraps unittest with pep8 function names, but throws deprecation
# warnings about it!
import warnings
warnings.filterwarnings('ignore', message=r".*assertRaisesRegex.*",
category=DeprecationWarning)
def test_1():
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
X = np.random.randn(10, 2)
ds = dataset(path, 'w', 'dir-npy')
ds[0] = X
assert set(os.listdir(path)) == set(('PROVENANCE.txt', '00000000.npy'))
np.testing.assert_array_equal(ds[0], X)
assert_raises(IndexError, lambda: ds[1])
assert len(ds) == 1
Y = np.zeros((10, 1))
Z = np.ones((2, 2))
ds[1] = Y
ds[2] = Z
np.testing.assert_array_equal(ds[1], Y)
np.testing.assert_array_equal(ds[2], Z)
assert len(ds) == 3
for i, item in enumerate(ds):
np.testing.assert_array_equal(item, [X, Y, Z][i])
except:
raise
finally:
shutil.rmtree(path)
def test_2():
path1 = tempfile.mkdtemp()
path2 = tempfile.mkdtemp()
shutil.rmtree(path1)
shutil.rmtree(path2)
try:
X = np.random.randn(10, 2)
Y = np.random.randn(10, 2)
ds1 = dataset(path1, 'w', 'dir-npy')
ds1[0] = X
ds2 = ds1.create_derived(path2)
ds2[0] = Y
np.testing.assert_array_equal(ds1[0], X)
np.testing.assert_array_equal(ds2[0], Y)
assert len(ds1) == 1
assert len(ds2) == 1
prov2 = ds2.provenance
print(prov2)
assert 2 == sum([s.startswith(' Command') for s in prov2.splitlines()])
except:
raise
finally:
shutil.rmtree(path1)
shutil.rmtree(path2)
def test_3():
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
ds = dataset(path, 'w', 'dir-npy')
ds[0] = np.random.randn(10, 2)
ds[1] = np.random.randn(10, 2)
ds[2] = np.random.randn(10, 2)
np.testing.assert_array_equal(ds[:][0], ds[0])
np.testing.assert_array_equal(ds[:][1], ds[1])
np.testing.assert_array_equal(ds[:][2], ds[2])
finally:
shutil.rmtree(path)
def test_4():
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
ds = dataset(path, 'w', 'dir-npy')
ds[0] = np.random.randn(10, 2)
v = ds.get(0, mmap=True)
assert isinstance(v, np.memmap)
np.testing.assert_array_equal(ds[0], v)
del v # close the underlying file
finally:
shutil.rmtree(path)
def test_mdtraj_1():
ds = dataset(get_fn('') + '*.pdb', fmt='mdtraj', verbose=True)
print(ds.keys())
print(ds.get(0))
print(ds.provenance)
ds = dataset(get_fn('') + '*.pdb', fmt='mdtraj', atom_indices=[1, 2],
verbose=True)
print(ds.keys())
print(ds.get(0))
print(ds.provenance)
def test_hdf5_1():
with tempdir():
ds = dataset('ds.h5', 'w', 'hdf5')
print(ds.provenance)
ds[0] = np.zeros(10)
np.testing.assert_array_equal(ds.get(0), np.zeros(10))
assert list(ds.keys()) == [0]
assert len(ds) == 1
ds[0] = np.random.randn(10, 1)
ds[1] = np.random.randn(10, 2)
ds[2] = np.random.randn(10, 3)
np.testing.assert_array_equal(ds[:][0], ds[0])
np.testing.assert_array_equal(ds[:][1], ds[1])
np.testing.assert_array_equal(ds[:][2], ds[2])
ds.close()
with dataset('ds.h5') as ds:
assert ds[0].shape == (10, 1)
def test_hdf5_2():
with tempdir():
with dataset('ds.h5', 'w', 'hdf5') as ds:
ds2 = ds.create_derived('ds2.h5')
print(ds2.provenance)
ds2.close()
def _sum_helper(ds):
value = sum(np.sum(x) for x in ds)
ds.close()
return value
def test_hdf5_3():
with tempdir():
with dataset('ds.h5', 'w', 'hdf5') as ds:
ds[0] = np.random.randn(10)
ds[1] = np.random.randn(10)
ref_sum = _sum_helper(ds)
iter_args = (dataset('ds.h5') for _ in range(5))
sums = Parallel(n_jobs=2)(
delayed(_sum_helper)(a) for a in iter_args)
assert all(s == ref_sum for s in sums)
def test_union_no_longer_exists():
with assert_raises_regexp(ValueError,
r".*[Uu]se msmbuilder\.featurizer\.FeatureUnion.*"):
mds = dataset(['ds1.h5', 'ds2.h5'], fmt='hdf5-union')
def test_order_1():
with tempdir():
with dataset('ds1.h5', 'w', 'hdf5') as ds1:
for i in range(20):
ds1[i] = np.random.randn(10)
assert list(ds1.keys()) == list(range(20))
with dataset('ds1/', 'w', 'dir-npy') as ds1:
for i in range(20):
ds1[i] = np.random.randn(10)
assert list(ds1.keys()) == list(range(20))
def test_append_dirnpy():
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
with dataset(path, 'w', 'dir-npy') as ds:
ds[0] = np.random.randn(10, 2)
with dataset(path, 'a', 'dir-npy') as ds:
ds[1] = np.random.randn(10, 2)
with dataset(path, 'a', 'dir-npy') as ds:
ds[2] = np.random.randn(10, 2)
with dataset(path, 'a', 'dir-npy') as ds:
# Overwrite
ds[2] = np.random.randn(10, 2)
np.testing.assert_array_equal(ds[:][0], ds[0])
np.testing.assert_array_equal(ds[:][1], ds[1])
np.testing.assert_array_equal(ds[:][2], ds[2])
finally:
shutil.rmtree(path)
def test_items():
with tempdir():
ds = dataset('ds.h5', 'w', 'hdf5')
ds[0] = np.random.randn(10, 1)
ds[1] = np.random.randn(10, 2)
ds[5] = np.random.randn(10, 3)
keys = [0, 1, 5]
for i, (k, v) in enumerate(ds.items()):
assert k == keys[i]
np.testing.assert_array_equal(ds[k], v)
np.testing.assert_array_equal(ds[:][0], ds[0])
np.testing.assert_array_equal(ds[:][1], ds[1])
np.testing.assert_array_equal(ds[:][2], ds[5])
ds.close()
| lgpl-2.1 |
heli522/scikit-learn | examples/model_selection/plot_precision_recall.py | 249 | 6150 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tests/test_take.py | 8 | 19159 | # -*- coding: utf-8 -*-
import re
from datetime import datetime
import nose
import numpy as np
from pandas.compat import long
import pandas.core.algorithms as algos
import pandas.util.testing as tm
from pandas.tslib import iNaT
_multiprocess_can_split_ = True
class TestTake(tm.TestCase):
# standard incompatible fill error
fill_error = re.compile("Incompatible type for fill_value")
_multiprocess_can_split_ = True
def test_1d_with_out(self):
def _test_dtype(dtype, can_hold_na, writeable=True):
data = np.random.randint(0, 2, 4).astype(dtype)
data.flags.writeable = writeable
indexer = [2, 1, 0, 1]
out = np.empty(4, dtype=dtype)
algos.take_1d(data, indexer, out=out)
expected = data.take(indexer)
tm.assert_almost_equal(out, expected)
indexer = [2, 1, 0, -1]
out = np.empty(4, dtype=dtype)
if can_hold_na:
algos.take_1d(data, indexer, out=out)
expected = data.take(indexer)
expected[3] = np.nan
tm.assert_almost_equal(out, expected)
else:
with tm.assertRaisesRegexp(TypeError, self.fill_error):
algos.take_1d(data, indexer, out=out)
# no exception o/w
data.take(indexer, out=out)
for writeable in [True, False]:
# Check that take_nd works both with writeable arrays (in which
# case fast typed memoryviews implementation) and read-only
# arrays alike.
_test_dtype(np.float64, True, writeable=writeable)
_test_dtype(np.float32, True, writeable=writeable)
_test_dtype(np.uint64, False, writeable=writeable)
_test_dtype(np.uint32, False, writeable=writeable)
_test_dtype(np.uint16, False, writeable=writeable)
_test_dtype(np.uint8, False, writeable=writeable)
_test_dtype(np.int64, False, writeable=writeable)
_test_dtype(np.int32, False, writeable=writeable)
_test_dtype(np.int16, False, writeable=writeable)
_test_dtype(np.int8, False, writeable=writeable)
_test_dtype(np.object_, True, writeable=writeable)
_test_dtype(np.bool, False, writeable=writeable)
def test_1d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_1d(data, indexer, fill_value=fill_value)
assert ((result[[0, 1, 2]] == data[[2, 1, 0]]).all())
assert (result[3] == fill_value)
assert (result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = algos.take_1d(data, indexer, fill_value=fill_value)
assert ((result[[0, 1, 2, 3]] == data[indexer]).all())
assert (result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_2d_with_out(self):
def _test_dtype(dtype, can_hold_na, writeable=True):
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
data.flags.writeable = writeable
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
if can_hold_na:
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected0[3, :] = np.nan
expected1[:, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
else:
for i, out in enumerate([out0, out1]):
with tm.assertRaisesRegexp(TypeError, self.fill_error):
algos.take_nd(data, indexer, out=out, axis=i)
# no exception o/w
data.take(indexer, out=out, axis=i)
for writeable in [True, False]:
# Check that take_nd works both with writeable arrays (in which
# case fast typed memoryviews implementation) and read-only
# arrays alike.
_test_dtype(np.float64, True, writeable=writeable)
_test_dtype(np.float32, True, writeable=writeable)
_test_dtype(np.uint64, False, writeable=writeable)
_test_dtype(np.uint32, False, writeable=writeable)
_test_dtype(np.uint16, False, writeable=writeable)
_test_dtype(np.uint8, False, writeable=writeable)
_test_dtype(np.int64, False, writeable=writeable)
_test_dtype(np.int32, False, writeable=writeable)
_test_dtype(np.int16, False, writeable=writeable)
_test_dtype(np.int8, False, writeable=writeable)
_test_dtype(np.object_, True, writeable=writeable)
_test_dtype(np.bool, False, writeable=writeable)
def test_2d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_nd(data, indexer, axis=0,
fill_value=fill_value)
assert ((result[[0, 1, 2], :] == data[[2, 1, 0], :]).all())
assert ((result[3, :] == fill_value).all())
assert (result.dtype == out_dtype)
result = algos.take_nd(data, indexer, axis=1,
fill_value=fill_value)
assert ((result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all())
assert ((result[:, 3] == fill_value).all())
assert (result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = algos.take_nd(data, indexer, axis=0,
fill_value=fill_value)
assert ((result[[0, 1, 2, 3], :] == data[indexer, :]).all())
assert (result.dtype == dtype)
result = algos.take_nd(data, indexer, axis=1,
fill_value=fill_value)
assert ((result[:, [0, 1, 2, 3]] == data[:, indexer]).all())
assert (result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_3d_with_out(self):
def _test_dtype(dtype, can_hold_na):
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
algos.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
if can_hold_na:
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
algos.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
expected0[3, :, :] = np.nan
expected1[:, 3, :] = np.nan
expected2[:, :, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
else:
for i, out in enumerate([out0, out1, out2]):
with tm.assertRaisesRegexp(TypeError, self.fill_error):
algos.take_nd(data, indexer, out=out, axis=i)
# no exception o/w
data.take(indexer, out=out, axis=i)
_test_dtype(np.float64, True)
_test_dtype(np.float32, True)
_test_dtype(np.uint64, False)
_test_dtype(np.uint32, False)
_test_dtype(np.uint16, False)
_test_dtype(np.uint8, False)
_test_dtype(np.int64, False)
_test_dtype(np.int32, False)
_test_dtype(np.int16, False)
_test_dtype(np.int8, False)
_test_dtype(np.object_, True)
_test_dtype(np.bool, False)
def test_3d_fill_nonna(self):
def _test_dtype(dtype, fill_value, out_dtype):
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_nd(data, indexer, axis=0,
fill_value=fill_value)
assert ((result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all())
assert ((result[3, :, :] == fill_value).all())
assert (result.dtype == out_dtype)
result = algos.take_nd(data, indexer, axis=1,
fill_value=fill_value)
assert ((result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all())
assert ((result[:, 3, :] == fill_value).all())
assert (result.dtype == out_dtype)
result = algos.take_nd(data, indexer, axis=2,
fill_value=fill_value)
assert ((result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all())
assert ((result[:, :, 3] == fill_value).all())
assert (result.dtype == out_dtype)
indexer = [2, 1, 0, 1]
result = algos.take_nd(data, indexer, axis=0,
fill_value=fill_value)
assert ((result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all())
assert (result.dtype == dtype)
result = algos.take_nd(data, indexer, axis=1,
fill_value=fill_value)
assert ((result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all())
assert (result.dtype == dtype)
result = algos.take_nd(data, indexer, axis=2,
fill_value=fill_value)
assert ((result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all())
assert (result.dtype == dtype)
_test_dtype(np.int8, np.int16(127), np.int8)
_test_dtype(np.int8, np.int16(128), np.int16)
_test_dtype(np.int32, 1, np.int32)
_test_dtype(np.int32, 2.0, np.float64)
_test_dtype(np.int32, 3.0 + 4.0j, np.complex128)
_test_dtype(np.int32, True, np.object_)
_test_dtype(np.int32, '', np.object_)
_test_dtype(np.float64, 1, np.float64)
_test_dtype(np.float64, 2.0, np.float64)
_test_dtype(np.float64, 3.0 + 4.0j, np.complex128)
_test_dtype(np.float64, True, np.object_)
_test_dtype(np.float64, '', np.object_)
_test_dtype(np.complex128, 1, np.complex128)
_test_dtype(np.complex128, 2.0, np.complex128)
_test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)
_test_dtype(np.complex128, True, np.object_)
_test_dtype(np.complex128, '', np.object_)
_test_dtype(np.bool_, 1, np.object_)
_test_dtype(np.bool_, 2.0, np.object_)
_test_dtype(np.bool_, 3.0 + 4.0j, np.object_)
_test_dtype(np.bool_, True, np.bool_)
_test_dtype(np.bool_, '', np.object_)
def test_1d_other_dtypes(self):
arr = np.random.randn(10).astype(np.float32)
indexer = [1, 2, 3, -1]
result = algos.take_1d(arr, indexer)
expected = arr.take(indexer)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_other_dtypes(self):
arr = np.random.randn(10, 5).astype(np.float32)
indexer = [1, 2, 3, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
expected = arr.take(indexer, axis=0)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
expected = arr.take(indexer, axis=1)
expected[:, -1] = np.nan
tm.assert_almost_equal(result, expected)
def test_1d_bool(self):
arr = np.array([0, 1, 0], dtype=bool)
result = algos.take_1d(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1])
self.assert_numpy_array_equal(result, expected)
result = algos.take_1d(arr, [0, 2, -1])
self.assertEqual(result.dtype, np.object_)
def test_2d_bool(self):
arr = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 1]], dtype=bool)
result = algos.take_nd(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1], axis=0)
self.assert_numpy_array_equal(result, expected)
result = algos.take_nd(arr, [0, 2, 2, 1], axis=1)
expected = arr.take([0, 2, 2, 1], axis=1)
self.assert_numpy_array_equal(result, expected)
result = algos.take_nd(arr, [0, 2, -1])
self.assertEqual(result.dtype, np.object_)
def test_2d_float32(self):
arr = np.random.randn(4, 3).astype(np.float32)
indexer = [0, 2, -1, 1, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=0, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = np.nan
tm.assert_almost_equal(result, expected)
# this now accepts a float32! # test with float64 out buffer
out = np.empty((len(indexer), arr.shape[1]), dtype='float32')
algos.take_nd(arr, indexer, out=out) # it works!
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=1, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_datetime64(self):
# 2005/01/01 - 2006/01/01
arr = np.random.randint(
long(11045376), long(11360736), (5, 3)) * 100000000000
arr = arr.view(dtype='datetime64[ns]')
indexer = [0, 2, -1, 1, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=0, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected.view(np.int64)[[2, 4], :] = iNaT
tm.assert_almost_equal(result, expected)
result = algos.take_nd(arr, indexer, axis=0,
fill_value=datetime(2007, 1, 1))
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, out=result2, axis=0,
fill_value=datetime(2007, 1, 1))
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=1, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected.view(np.int64)[:, [2, 4]] = iNaT
tm.assert_almost_equal(result, expected)
result = algos.take_nd(arr, indexer, axis=1,
fill_value=datetime(2007, 1, 1))
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, out=result2, axis=1,
fill_value=datetime(2007, 1, 1))
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
great-expectations/great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_not_be_outliers.py | 1 | 14239 | import json
from scipy import stats
# !!! This giant block of imports should be something simpler, such as:
# from great_exepectations.helpers.expectation_creation import *
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.expectation import (
ColumnMapExpectation,
Expectation,
ExpectationConfiguration,
)
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
from great_expectations.expectations.registry import (
_registered_expectations,
_registered_metrics,
_registered_renderers,
)
from great_expectations.expectations.util import render_evaluation_parameter_string
from great_expectations.render.renderer.renderer import renderer
from great_expectations.render.types import RenderedStringTemplateContent
from great_expectations.render.util import num_to_str, substitute_none_for_missing
from great_expectations.validator.validator import Validator
# This class defines a Metric to support your Expectation
# For most Expectations, the main business logic for calculation will live here.
# To learn about the relationship between Metrics and Expectations, please visit
# https://docs.greatexpectations.io/en/latest/reference/core_concepts.html#expectations-and-metrics.
class ColumnValuesNotOutliers(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
# Please see https://docs.greatexpectations.io/en/latest/reference/core_concepts/metrics.html#metrics
# for information on how to choose an id string for your Metric.
condition_metric_name = "column_values.not_outliers"
condition_value_keys = ("method", "multiplier")
# This method defines the business logic for evaluating your metric when using a PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, method="iqr", multiplier=1.5, **kwargs):
if method == "iqr":
iqr = stats.iqr(column)
median = column.median()
return (column - median).abs() < multiplier * iqr
elif method == "std":
std = column.std()
mean = column.mean()
return (column - mean).abs() < multiplier * std
else:
raise NotImplementedError(f"method {method} has not been implemented")
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# return column.in_([3])
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# return column.isin([3])
# This class defines the Expectation itself
# The main business logic for calculation lives here.
class ExpectColumnValuesToNotBeOutliers(ColumnMapExpectation):
"""
Expect Column Values to not be outliers. User is asked to specify the column, method and multiplier. Currently
standard deviation (std) and inter-quantile range (iqr) are supported.
"""
# These examples will be shown in the public gallery, and also executed as unit tests for your Expectation
examples = [
{
"data": {
"a": [
0.88546138,
1.3061609,
-0.32247349,
0.97258135,
0.98273209,
-0.10502805,
0.63429027,
-0.520042,
-0.15674414,
0.94144714,
-0.88228603,
-0.60380027,
-0.11121819,
0.74895147,
0.42992403,
0.65493905,
1.35901276,
0.49965162,
2.0,
3.0,
], # drawn from Normal(0,1)
"b": [
1.46104728,
1.33568658,
1.39303305,
1.34369635,
2.07627429,
3.22523841,
1.2514533,
2.44427933,
2.12703316,
3.29557985,
1.04298411,
1.3659108,
4.18867559,
2.85009897,
1.58180929,
1.47433799,
1.10678471,
4.73338285,
5.0,
10.0,
], # drawn from Gamma(1,1)
"c": [
78.09208927,
79.08947083,
78.15403075,
91.01199697,
86.87351353,
93.31079309,
92.41605866,
85.95186289,
85.57633936,
82.9214903,
78.67996655,
83.65076874,
76.51547517,
75.95991938,
73.56762212,
98.82595865,
88.0945241,
75.38697834,
115.0,
0.0,
], # drawn from Beta(11, 2)
"d": [
0.15131528,
-0.32290392,
0.33894553,
0.41806171,
0.09906698,
0.32659221,
-0.07283207,
0.72584037,
0.07496465,
-0.28889126,
3.57416451,
3.44258958,
3.11353884,
2.82008269,
3.68115642,
3.23682442,
2.70231677,
3.21949992,
4.06638354,
4.77655811,
],
},
"tests": [
{
"title": "positive_test_std",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "a", "method": "std", "multiplier": 3},
"out": {
"success": True,
},
},
{
"title": "negative_test_iqr",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "a", "method": "iqr", "multiplier": 1.5},
"out": {
"success": False,
"unexpected_index_list": [19],
"unexpected_list": [3],
},
},
{
"title": "negative_test_iqr_mostly",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "b",
"mostly": 0.9,
"method": "iqr",
"multiplier": 1.5,
},
"out": {
"success": False,
"unexpected_index_list": [17, 18, 19],
"unexpected_list": [4.73338285, 5.0, 10.0],
},
},
{
"title": "positive_test_std_mostly",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "b",
"mostly": 0.9,
"method": "std",
"multiplier": 3,
},
"out": {
"success": True,
},
},
{
"title": "negative_test_std",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "c", "method": "std", "multiplier": 3},
"out": {
"success": False,
"unexpected_index_list": [19],
"unexpected_list": [0],
},
},
{
"title": "positive_test_iqr",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "d", "method": "iqr", "multiplier": 1.5},
"out": {
"success": True,
},
},
],
}
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [ # Tags for this Expectation in the gallery
# "experimental"
],
"contributors": [ # Github handles for all contributors to this Expectation.
"@rexboyce",
"@lodeous",
"@bragleg",
],
"package": "experimental_expectations",
}
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.not_outliers"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
# Please see https://docs.greatexpectations.io/en/latest/reference/core_concepts/expectations/expectations.html#expectation-concepts-domain-and-success-keys
# for more information about domain and success keys, and other arguments to Expectations
success_keys = ("mostly", "method", "multiplier")
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This method defines a question Renderer
# For more info on Renderers, see
# https://docs.greatexpectations.io/en/latest/guides/how_to_guides/configuring_data_docs/how_to_create_renderers_for_custom_expectations.html
# !!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.question")
# def _question_renderer(
# cls, configuration, result=None, language=None, runtime_configuration=None
# ):
# column = configuration.kwargs.get("column")
# mostly = configuration.kwargs.get("mostly")
#
# return f'Do at least {mostly * 100}% of values in column "{column}" equal 3?'
# This method defines an answer Renderer
# !!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.answer")
# def _answer_renderer(
# cls, configuration=None, result=None, language=None, runtime_configuration=None
# ):
# column = result.expectation_config.kwargs.get("column")
# mostly = result.expectation_config.kwargs.get("mostly")
# regex = result.expectation_config.kwargs.get("regex")
# if result.success:
# return f'At least {mostly * 100}% of values in column "{column}" equal 3.'
# else:
# return f'Less than {mostly * 100}% of values in column "{column}" equal 3.'
# This method defines a prescriptive Renderer
# @classmethod
# @renderer(renderer_type="renderer.prescriptive")
# @render_evaluation_parameter_string
# def _prescriptive_renderer(
# cls,
# configuration=None,
# result=None,
# language=None,
# runtime_configuration=None,
# **kwargs,
# ):
# # !!! This example renderer should be shorter
# runtime_configuration = runtime_configuration or {}
# include_column_name = runtime_configuration.get("include_column_name", True)
# include_column_name = (
# include_column_name if include_column_name is not None else True
# )
# styling = runtime_configuration.get("styling")
# params = substitute_none_for_missing(
# configuration.kwargs,
# ["column", "regex", "mostly", "row_condition", "condition_parser"],
# )
#
# template_str = "values must be equal to 3"
# if params["mostly"] is not None:
# params["mostly_pct"] = num_to_str(
# params["mostly"] * 100, precision=15, no_scientific=True
# )
# # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
# template_str += ", at least $mostly_pct % of the time."
# else:
# template_str += "."
#
# if include_column_name:
# template_str = "$column " + template_str
#
# if params["row_condition"] is not None:
# (
# conditional_template_str,
# conditional_params,
# ) = parse_row_condition_string_pandas_engine(params["row_condition"])
# template_str = conditional_template_str + ", then " + template_str
# params.update(conditional_params)
#
# return [
# RenderedStringTemplateContent(
# **{
# "content_block_type": "string_template",
# "string_template": {
# "template": template_str,
# "params": params,
# "styling": styling,
# },
# }
# )
# ]
if __name__ == "__main__":
diagnostics_report = ExpectColumnValuesToNotBeOutliers().run_diagnostics()
print(json.dumps(diagnostics_report, indent=2))
| apache-2.0 |
ephes/scikit-learn | sklearn/metrics/tests/test_regression.py | 272 | 6066 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| bsd-3-clause |
rahul-c1/scikit-learn | sklearn/feature_selection/tests/test_base.py | 170 | 3666 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
robin-lai/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
emmanuelle/scikits.image | doc/examples/plot_swirl.py | 2 | 2572 | r"""
=====
Swirl
=====
Image swirling is a non-linear image deformation that creates a whirlpool
effect. This example describes the implementation of this transform in
``skimage``, as well as the underlying warp mechanism.
Image warping
`````````````
When applying a geometric transformation on an image, we typically make use of
a reverse mapping, i.e., for each pixel in the output image, we compute its
corresponding position in the input. The reason is that, if we were to do it
the other way around (map each input pixel to its new output position), some
pixels in the output may be left empty. On the other hand, each output
coordinate has exactly one corresponding location in (or outside) the input
image, and even if that position is non-integer, we may use interpolation to
compute the corresponding image value.
Performing a reverse mapping
````````````````````````````
To perform a geometric warp in ``skimage``, you simply need to provide the
reverse mapping to the ``skimage.transform.warp`` function. E.g., consider the
case where we would like to shift an image 50 pixels to the left. The reverse
mapping for such a shift would be::
def shift_left(xy):
xy[:, 0] += 50
return xy
The corresponding call to warp is::
from skimage.transform import warp
warp(image, shift_left)
The swirl transformation
````````````````````````
Consider the coordinate :math:`(x, y)` in the output image. The reverse
mapping for the swirl transformation first computes, relative to a center
:math:`(x_0, y_0)`, its polar coordinates,
.. math::
\theta = \arctan(y/x)
\rho = \sqrt{(x - x_0)^2 + (y - y_0)^2},
and then transforms them according to
.. math::
r = \ln(2) \, \mathtt{radius} / 5
\phi = \mathtt{rotation}
s = \mathtt{strength}
\theta' = \phi + s \, e^{-\rho / r + \theta}
where ``strength`` is a parameter for the amount of swirl, ``radius`` indicates
the swirl extent in pixels, and ``rotation`` adds a rotation angle. The
transformation of ``radius`` into :math:`r` is to ensure that the
transformation decays to :math:`\approx 1/1000^{\mathsf{th}}` within the
specified radius.
"""
from skimage import data
from skimage.transform import swirl
import matplotlib.pyplot as plt
image = data.checkerboard()
swirled = swirl(image, rotation=0, strength=10, radius=120, order=2)
f, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 3))
ax0.imshow(image, cmap=plt.cm.gray, interpolation='none')
ax0.axis('off')
ax1.imshow(swirled, cmap=plt.cm.gray, interpolation='none')
ax1.axis('off')
plt.show()
| bsd-3-clause |
shikhardb/scikit-learn | sklearn/naive_bayes.py | 11 | 27998 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
epsilon = 1e-9
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# convert to float to support sample weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# convert to float to support sample weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
mas-dse-greina/neon | unet/keras_unet_lung_transposed.py | 1 | 9062 | #!/usr/bin/env python
'''
BEGIN - Limit Tensoflow to only use specific GPU
'''
import os
gpu_num = 2
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # see issue #152
os.environ['CUDA_VISIBLE_DEVICES'] = '{}'.format(gpu_num)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Supress Tensforflow debug messages
import tensorflow as tf
'''
END - Limit Tensoflow to only use specific GPU
'''
import numpy as np # linear algebra
import cv2
from sklearn.model_selection import train_test_split
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, Conv2DTranspose, concatenate, UpSampling2D
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from keras.utils import to_categorical
# configuration session
config = tf.ConfigProto()
config.gpu_options.allow_growth = False
config.gpu_options.per_process_gpu_memory_fraction = 0.8 # Use only 80% of available gpu memory
sess = tf.Session(config=config)
K.set_session(sess)
IMAGE_LIB = 'finding-lungs-in-ct-data/2d_images/'
MASK_LIB = 'finding-lungs-in-ct-data/2d_masks/'
IMG_HEIGHT, IMG_WIDTH = 256, 256
SEED=16
def dice_coef(y_true, y_pred, smooth = 1. ):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
coef = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
return coef
def dice_coef_loss(y_true, y_pred):
return -K.log(dice_coef(y_true, y_pred))
def img_generator(x_train, y_train, batch_size):
data_generator = ImageDataGenerator(
width_shift_range=0.1,
height_shift_range=0.1,
rotation_range=10,
zoom_range=0.1).flow(x_train, x_train, batch_size, seed=SEED)
mask_generator = ImageDataGenerator(
width_shift_range=0.1,
height_shift_range=0.1,
rotation_range=10,
zoom_range=0.1).flow(y_train, y_train, batch_size, seed=SEED)
while True:
x_batch, _ = data_generator.next()
y_batch, _ = mask_generator.next()
yield x_batch, y_batch
'''
Get all of the images in the data directory
'''
all_images = [x for x in sorted(os.listdir(IMAGE_LIB)) if x[-4:] == '.tif']
x_data = np.empty((len(all_images), IMG_HEIGHT, IMG_WIDTH), dtype='float32')
for i, name in enumerate(all_images):
im = cv2.imread(IMAGE_LIB + name, cv2.IMREAD_UNCHANGED).astype('float32')
im = cv2.resize(im, dsize=(IMG_WIDTH, IMG_HEIGHT), interpolation=cv2.INTER_LANCZOS4)
im = (im - np.min(im)) / (np.max(im) - np.min(im))
x_data[i] = im
y_data = np.empty((len(all_images), IMG_HEIGHT, IMG_WIDTH), dtype='float32')
for i, name in enumerate(all_images):
im = cv2.imread(MASK_LIB + name, cv2.IMREAD_UNCHANGED).astype('float32')/255.
im = cv2.resize(im, dsize=(IMG_WIDTH, IMG_HEIGHT), interpolation=cv2.INTER_NEAREST)
y_data[i] = im
x_data = x_data[:,:,:,np.newaxis]
y_data = y_data[:,:,:,np.newaxis]
x_train, x_val, y_train, y_val = train_test_split(x_data, y_data, test_size = 0.3)
print('X train shape = {}'.format(x_train.shape))
print('Y train shape = {}'.format(y_train.shape))
def create_unet_model(USE_ORIGINAL = True):
'''
This is based on the original paper.
Olaf Ronneberger, Philipp Fischer, and Thomas Brox, U-Net: Convolutional Networks for Biomedical Image Segmentation
https://arxiv.org/pdf/1505.04597.pdf
If USE_ORIGINAL is true, then follow the original UpPooling from the paper. If not, the replace
UpPooling with a Transposed Convolution.
'''
inputs = Input((IMG_HEIGHT, IMG_WIDTH, 1))
# "Contracting path" (down the left side of the U)
# Each level doubles the number of feature maps but halves the map size
conv1 = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same', name='contract1')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same', name='contract2')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', padding='same', name='contract3')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', padding='same', name='contract4')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(filters=1024, kernel_size=(3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(filters=1024, kernel_size=(3, 3), activation='relu', padding='same', name='contract5')(conv5)
# "Expansive path" (up the right side of the U)
# With each up layer, we concatenate
# the features maps from the downsampling encoder part so that our
# classifier has features from multiple receptive field scales.
# There's a choice in how we should do the upward sampling.
# In the paper they perform an UpPooling to increase the feature map height and width.
# We could also replace the UpPooling with a Transposed Convolution to do the same thing.
if USE_ORIGINAL:
conv6 = UpSampling2D(size=(2,2), name='expand6_up')(conv5)
else:
conv6 = Conv2DTranspose(filters=512, kernel_size=(2, 2), strides=(2, 2), padding='same', name='expand6_trans')(conv5)
concat6 = concatenate([conv6, conv4], axis=3)
conv6 = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', padding='same')(concat6)
conv6 = Conv2D(filters=512, kernel_size=(3, 3), activation='relu', padding='same')(conv6)
if USE_ORIGINAL:
conv7 = UpSampling2D(size=(2,2), name='expand7_up')(conv6)
else:
conv7 = Conv2DTranspose(filters=256, kernel_size=(2, 2), strides=(2, 2), padding='same', name='expand7_trans')(conv6)
concat7 = concatenate([conv7, conv3], axis=3)
conv7 = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', padding='same')(concat7)
conv7 = Conv2D(filters=256, kernel_size=(3, 3), activation='relu', padding='same')(conv7)
if USE_ORIGINAL:
conv8 = UpSampling2D(size=(2,2), name='expand8_up')(conv7)
else:
conv8 = Conv2DTranspose(filters=128, kernel_size=(2, 2), strides=(2, 2), padding='same', name='expand8_trans')(conv7)
concat8 = concatenate([conv8, conv2], axis=3)
conv8 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same')(concat8)
conv8 = Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same')(conv8)
if USE_ORIGINAL:
conv9 = UpSampling2D(size=(2,2), name='expand9_up')(conv8)
else:
conv9 = Conv2DTranspose(filters=64, kernel_size=(2, 2), strides=(2, 2), padding='same', name='expand9_trans')(conv8)
concat9 = concatenate([conv9, conv1], axis=3)
conv9 = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(concat9)
conv9 = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(conv9)
output_layer = Conv2D(filters=1, kernel_size=(1,1), activation='sigmoid', name='Output')(conv9)
model = Model(inputs=[inputs], outputs=[output_layer])
# The paper uses binary_crossentropy for the loss function. However, dice_coefficient has been
# recommended as a better loss function for segmentation models.
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])
return model
model = create_unet_model(USE_ORIGINAL=True)
model.summary()
save_weights = ModelCheckpoint('lung.h5', monitor='val_dice_coef',
save_best_only=True, save_weights_only=True)
batch_size = 8
hist = model.fit_generator(img_generator(x_train, y_train, batch_size),
steps_per_epoch = 200, #x_train.shape[0]//batch_size,
shuffle=True,
validation_data = (x_val, y_val),
epochs=10, verbose=1,
callbacks = [save_weights])
import matplotlib.pyplot as plt
model.load_weights('lung.h5')
y_hat = model.predict(x_val)
np.save('y_hat', y_hat)
np.save('y_val', y_val)
np.save('x_val', x_val)
fig, ax = plt.subplots(1,3,figsize=(12,6))
ax[0].imshow(x_val[0,:,:,0], cmap='gray')
ax[1].imshow(y_val[0,:,:,0])
ax[2].imshow(y_hat[0,:,:,0])
plt.savefig('lung_segmented1.png', dpi=600)
imgNum = 10
fig, ax = plt.subplots(1,3,figsize=(12,6))
ax[0].imshow(x_val[imgNum,:,:,0], cmap='gray')
ax[1].imshow(y_val[imgNum,:,:,0])
ax[2].imshow(y_hat[imgNum,:,:,0])
plt.savefig('lung_segmented{}.png'.format(imgNum), dpi=600)
print('FINISHED Keras UNet.')
| apache-2.0 |
yask123/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 68 | 23597 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr", "sag"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
| bsd-3-clause |
RondaStrauch/landlab | landlab/plot/drainage_plot.py | 4 | 3499 | """Plot drainage network.
"""
# KRB, FEB 2017.
import six
from landlab import CORE_NODE, FIXED_VALUE_BOUNDARY, FIXED_GRADIENT_BOUNDARY, CLOSED_BOUNDARY
import matplotlib.pylab as plt
from landlab.plot.imshow import imshow_node_grid
import numpy as np
def drainage_plot(mg,
surface='topographic__elevation',
receivers=None,
proportions=None,
surf_cmap='gray',
quiver_cmap='viridis',
title = 'Drainage Plot'):
if isinstance(surface, six.string_types):
colorbar_label = surface
else:
colorbar_label = 'topographic_elevation'
imshow_node_grid(mg, surface, cmap=surf_cmap, colorbar_label=colorbar_label)
if receivers is None:
try:
receivers = mg.at_node['flow__receiver_nodes']
if proportions is None:
try:
proportions = mg.at_node['flow__receiver_proportions']
except:
pass
except:
receivers = np.reshape(mg.at_node['flow__receiver_node'],(mg.number_of_nodes,1))
nreceievers = int(receivers.size/receivers.shape[0])
propColor=plt.get_cmap(quiver_cmap)
for j in range(nreceievers):
rec = receivers[:,j]
is_bad = rec == -1
xdist = -0.8*(mg.node_x-mg.node_x[rec])
ydist = -0.8*(mg.node_y-mg.node_y[rec])
if proportions is None:
proportions = np.ones_like(receivers, dtype=float)
is_bad[proportions[:,j]==0.]=True
xdist[is_bad] = np.nan
ydist[is_bad] = np.nan
prop = proportions[:,j]*256.
lu = np.floor(prop)
colors = propColor(lu.astype(int))
shape = (mg.number_of_nodes, 1)
plt.quiver(mg.node_x.reshape(shape), mg.node_y.reshape(shape),
xdist.reshape(shape), ydist.reshape(shape),
color=colors,
angles='xy',
scale_units='xy',
scale=1,
zorder=3)
# Plot differen types of nodes:
o, = plt.plot(mg.node_x[mg.status_at_node == CORE_NODE], mg.node_y[mg.status_at_node == CORE_NODE], 'b.', label='Core Nodes', zorder=4)
fg, = plt.plot(mg.node_x[mg.status_at_node == FIXED_VALUE_BOUNDARY], mg.node_y[mg.status_at_node == FIXED_VALUE_BOUNDARY], 'c.', label='Fixed Gradient Nodes', zorder=5)
fv, = plt.plot(mg.node_x[mg.status_at_node == FIXED_GRADIENT_BOUNDARY], mg.node_y[mg.status_at_node ==FIXED_GRADIENT_BOUNDARY], 'g.', label='Fixed Value Nodes', zorder=6)
c, = plt.plot(mg.node_x[mg.status_at_node == CLOSED_BOUNDARY], mg.node_y[mg.status_at_node ==CLOSED_BOUNDARY], 'r.', label='Closed Nodes', zorder=7)
node_id = np.arange(mg.number_of_nodes)
flow_to_self = receivers[:,0]==node_id
fts, = plt.plot(mg.node_x[flow_to_self], mg.node_y[flow_to_self], 'kx', markersize=6, label = 'Flows To Self', zorder=8)
ax = plt.gca()
ax.legend(labels = ['Core Nodes', 'Fixed Gradient Nodes', 'Fixed Value Nodes', 'Closed Nodes', 'Flows To Self'],
handles = [o, fg, fv, c, fts], numpoints=1, loc='center left', bbox_to_anchor=(1.7, 0.5))
sm = plt.cm.ScalarMappable(cmap=propColor, norm=plt.Normalize(vmin=0, vmax=1))
sm._A = []
cx = plt.colorbar(sm)
cx.set_label('Proportion of Flow')
plt.title(title)
plt.show() | mit |
ashhher3/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 30 | 4516 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================================
Model selection with Probabilistic (PCA) and Factor Analysis (FA)
=================================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
glennq/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
jblackburne/scikit-learn | sklearn/datasets/samples_generator.py | 26 | 56554 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
nesterione/scikit-learn | sklearn/feature_selection/rfe.py | 137 | 17066 | # Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| bsd-3-clause |
zorroblue/scikit-learn | sklearn/linear_model/bayes.py | 5 | 19797 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from scipy.linalg import pinvh
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : float
estimated precision of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
For an example, see :ref:`examples/linear_model/plot_bayesian_ridge.py
<sphx_glr_auto_examples_linear_model_plot_bayesian_ridge.py>`.
References
----------
D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
Vol. 4, No. 3, 1992.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our ``self.alpha_``
Their alpha is our ``self.lambda_``
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values. Will be cast to X's dtype if necessary
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_samples, n_features = X.shape
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ +
lambda_ / alpha_)[:, np.newaxis])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
# Preserve the alpha and lambda values that were used to
# calculate the final coefficients
self.alpha_ = alpha_
self.lambda_ = lambda_
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse_ -
(lambda_ * np.sum(coef_ ** 2)) -
logdet_sigma_ -
n_samples * log(2 * np.pi))
self.scores_.append(s)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
sigma_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis])
self.sigma_ = (1. / alpha_) * sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
For an example, see :ref:`examples/linear_model/plot_ard.py
<sphx_glr_auto_examples_linear_model_plot_ard.py>`.
References
----------
D. J. C. MacKay, Bayesian nonlinear modeling for the prediction
competition, ASHRAE Transactions, 1994.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our ``self.alpha_``
Their alpha is our ``self.lambda_``
ARD is a little different than the slide: only dimensions/features for
which ``self.lambda_ < self.threshold_lambda`` are kept and the rest are
discarded.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers). Will be cast to X's dtype if necessary
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True,
ensure_min_samples=2)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
# Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
X = X[:, self.lambda_ < self.threshold_lambda]
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
| bsd-3-clause |
yyjiang/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 40 | 6991 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import datasets
from sklearn import cross_validation
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
| bsd-3-clause |
sannecottaar/burnman | burnman/anisotropy.py | 5 | 17005 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2017 by the BurnMan team, released under the GNU
# GPL v2 or later.
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from .tools import unit_normalize
from .material import Material, material_property
class AnisotropicMaterial(Material):
"""
A class that represents an anisotropic elastic material. This class
is initialised with a set of elastic constants and a density. It can
then be interrogated to find the values of different properties,
such as bounds on seismic velocities. There are also several functions
which can be called to calculate properties along directions oriented
with respect to the elastic tensor. Initialization is via a density
and a full stiffness tensor in Voigt notation
See :cite:`Mainprice2011` Geological Society of London Special Publication
and https://materialsproject.org/wiki/index.php/Elasticity_calculations
for mathematical descriptions of each function.
"""
def __init__(self, rho, cijs):
self.params = {'rho_0': rho,
'stiffness_tensor_0': cijs}
assert self.params['stiffness_tensor_0'].shape == (6, 6), 'stiffness_tensor must be in Voigt notation (6x6)'
assert np.allclose(self.params['stiffness_tensor_0'].T,
self.params['stiffness_tensor_0']), 'stiffness_tensor must be symmetric'
Material.__init__(self)
def _voigt_index_to_ij(self, m):
"""
Returns the ij (or kl) indices of the
stiffness tensor which correspond to those
of the Voigt notation m (or n).
"""
if m == 3:
return 1, 2
elif m == 4:
return 0, 2
elif m == 5:
return 0, 1
else:
return m, m
def _voigt_notation_to_stiffness_tensor(self, voigt_notation):
"""
Converts a stiffness tensor in Voigt notation (6x6 matrix)
to the full fourth rank tensor (3x3x3x3 matrix).
"""
stiffness_tensor = np.zeros([3, 3, 3, 3])
for m in range(6):
i, j = self._voigt_index_to_ij(m)
for n in range(6):
k, l = self._voigt_index_to_ij(n)
stiffness_tensor[i][j][k][l] = voigt_notation[m][n]
stiffness_tensor[j][i][k][l] = voigt_notation[m][n]
stiffness_tensor[i][j][l][k] = voigt_notation[m][n]
stiffness_tensor[j][i][l][k] = voigt_notation[m][n]
return stiffness_tensor
@material_property
def stiffness_tensor(self):
return self.params['stiffness_tensor_0']
@material_property
def full_stiffness_tensor(self):
return self._voigt_notation_to_stiffness_tensor(self.stiffness_tensor)
@material_property
def compliance_tensor(self):
return np.linalg.inv(self.stiffness_tensor)
@material_property
def full_compliance_tensor(self):
block = np.array(np.bmat( [[[[1.]*3]*3, [[2.]*3]*3], [[[2.]*3]*3, [[4.]*3]*3]] ))
return self._voigt_notation_to_stiffness_tensor(np.divide(self.compliance_tensor, block))
@material_property
def density(self):
return self.params['rho_0']
@material_property
def bulk_modulus_voigt(self):
"""
Computes the bulk modulus (Voigt bound)
"""
K = np.sum([[self.stiffness_tensor[i][k] for k in range(3)] for i in range(3)])/9.
return K
@material_property
def bulk_modulus_reuss(self):
"""
Computes the bulk modulus (Reuss bound)
"""
beta = np.sum([[self.compliance_tensor[i][k] for k in range(3)] for i in range(3)])
return 1./beta
@material_property
def bulk_modulus_vrh(self):
"""
Computes the bulk modulus (Voigt-Reuss-Hill average)
"""
return 0.5*(self.bulk_modulus_voigt + self.bulk_modulus_reuss)
@material_property
def shear_modulus_voigt(self):
"""
Computes the shear modulus (Voigt bound)
"""
G = ( np.sum([self.stiffness_tensor[i][i] for i in [0, 1, 2]]) +
np.sum([self.stiffness_tensor[i][i] for i in [3, 4, 5]])*3. -
( self.stiffness_tensor[0][1] +
self.stiffness_tensor[1][2] +
self.stiffness_tensor[2][0] )) / 15.
return G
@material_property
def shear_modulus_reuss(self):
"""
Computes the shear modulus (Reuss bound)
"""
beta = ( np.sum([self.compliance_tensor[i][i] for i in [0, 1, 2]])*4. +
np.sum([self.compliance_tensor[i][i] for i in [3, 4, 5]])*3. -
( self.compliance_tensor[0][1] +
self.compliance_tensor[1][2] +
self.compliance_tensor[2][0])*4. ) / 15.
return 1./beta
@material_property
def shear_modulus_vrh(self):
"""
Computes the shear modulus (Voigt-Reuss-Hill average)
"""
return 0.5*(self.shear_modulus_voigt + self.shear_modulus_reuss)
@material_property
def universal_elastic_anisotropy(self):
"""
Compute the universal elastic anisotropy
"""
return ( 5.*(self.shear_modulus_voigt/self.shear_modulus_reuss) +
(self.bulk_modulus_voigt/self.bulk_modulus_reuss) - 6. )
@material_property
def isotropic_poisson_ratio(self):
"""
Compute mu, the isotropic Poisson ratio
(a description of the laterial response to loading)
"""
return ( (3.*self.bulk_modulus_vrh - 2.*self.shear_modulus_vrh) /
(6.*self.bulk_modulus_vrh + 2.*self.shear_modulus_vrh) )
def christoffel_tensor(self, propagation_direction):
"""
Computes the Christoffel tensor from an elastic stiffness
tensor and a propagation direction for a seismic wave
relative to the stiffness tensor
T_ik = C_ijkl n_j n_l
"""
propagation_direction = unit_normalize(propagation_direction)
Tik = np.tensordot(np.tensordot(self.full_stiffness_tensor,
propagation_direction,
axes=([1],[0])),
propagation_direction,
axes=([2],[0]))
return Tik
def linear_compressibility(self, direction):
"""
Computes the linear compressibility in a given direction
relative to the stiffness tensor
"""
direction = unit_normalize(direction)
Sijkk = np.einsum('ijkk', self.full_compliance_tensor)
beta = Sijkk.dot(direction).dot(direction)
return beta
def youngs_modulus(self, direction):
"""
Computes the Youngs modulus in a given direction
relative to the stiffness tensor
"""
direction = unit_normalize(direction)
Sijkl = self.full_compliance_tensor
S = Sijkl.dot(direction).dot(direction).dot(direction).dot(direction)
return 1./S
def shear_modulus(self, plane_normal, shear_direction):
"""
Computes the shear modulus on a plane in a given
shear direction relative to the stiffness tensor
"""
plane_normal = unit_normalize(plane_normal)
shear_direction = unit_normalize(shear_direction)
assert np.abs(plane_normal.dot(shear_direction)) < np.finfo(np.float).eps, 'plane_normal and shear_direction must be orthogonal'
Sijkl = self.full_compliance_tensor
G = Sijkl.dot(shear_direction).dot(plane_normal).dot(shear_direction).dot(plane_normal)
return 0.25/G
def poissons_ratio(self,
axial_direction,
lateral_direction):
"""
Computes the poisson ratio given loading and response
directions relative to the stiffness tensor
"""
axial_direction = unit_normalize(axial_direction)
lateral_direction = unit_normalize(lateral_direction)
assert np.abs(axial_direction.dot(lateral_direction)) < np.finfo(np.float).eps, 'axial_direction and lateral_direction must be orthogonal'
Sijkl = self.full_compliance_tensor
x = axial_direction
y = lateral_direction
nu = -(Sijkl.dot(y).dot(y).dot(x).dot(x) /
Sijkl.dot(x).dot(x).dot(x).dot(x) )
return nu
def wave_velocities(self, propagation_direction):
"""
Computes the compressional wave velocity, and two
shear wave velocities in a given propagation direction
Returns two lists, containing the wave speeds and
directions of particle motion relative to the stiffness tensor
"""
propagation_direction = unit_normalize(propagation_direction)
Tik = self.christoffel_tensor(propagation_direction)
eigenvalues, eigenvectors = np.linalg.eig(Tik)
idx = eigenvalues.argsort()[::-1]
eigenvalues = np.real(eigenvalues[idx])
eigenvectors = eigenvectors[:,idx]
velocities = np.sqrt(eigenvalues/self.rho)
return velocities, eigenvectors
def voigt_array_from_cijs(cijs, index_lists):
C = np.zeros([6, 6])
for i, index_list in enumerate(index_lists):
for indices in index_list:
C[indices] = cijs[i]
C[indices[::-1]] = cijs[i]
return C
class IsotropicMaterial(AnisotropicMaterial):
"""
A class derived from the AnisotropicMaterial base class
Initialization takes two input parameters; rho and
[C12, C44] (i.e. lambda and mu, the Lame parameters)
"""
def __init__(self, rho, cijs):
assert len(cijs) == 2
cijs = list(cijs)
cijs.insert(0, cijs[0] + 2.*cijs[1]) # C11 = C12 + 2C44
index_lists = [[(0, 0), (1, 1), (2, 2)], # C11
[(0, 1), (0, 2), (1, 2)], # C12
[(3, 3), (4, 4), (5, 5)]] # C44
AnisotropicMaterial.__init__(
self, rho, voigt_array_from_cijs(cijs, index_lists))
class CubicMaterial(AnisotropicMaterial):
"""
A class derived from the AnisotropicMaterial base class
Initialization takes two input parameters; rho and
[C11, C12, C44]
"""
def __init__(self, rho, cijs):
assert len(cijs) == 3
index_lists = [[(0, 0), (1, 1), (2, 2)], # C11
[(0, 1), (0, 2), (1, 2)], # C12
[(3, 3), (4, 4), (5, 5)]] # C44
AnisotropicMaterial.__init__(
self, rho, voigt_array_from_cijs(cijs, index_lists))
class HexagonalMaterial(AnisotropicMaterial):
"""
A class derived from the AnisotropicMaterial base class
Initialization takes two input parameters; rho and
[C11, C12, C13, C33, C44]
"""
def __init__(self, rho, cijs):
assert len(cijs) == 5
cijs = list(cijs)
cijs.append((cijs[0] - cijs[1])/2.) # C66 = (C11-C12)/2.
index_lists = [[(0, 0), (1, 1)], # C11
[(0, 1)], # C12
[(0, 2), (1, 2)], # C13
[(2, 2)], # C33
[(3, 3), (4, 4)], # C44
[(5, 5)]] # C66
AnisotropicMaterial.__init__(
self, rho, voigt_array_from_cijs(cijs, index_lists))
class TetragonalMaterial(AnisotropicMaterial):
"""
A class derived from the AnisotropicMaterial base class
Initialization takes two input parameters; rho and
[C11, C12, C13, C33, C44, C66] or
[C11, C12, C13, C16, C33, C44, C66]
"""
def __init__(self, rho, cijs):
if len(cijs) == 6:
# Tetragonal I / Laue class 4/mmm
index_lists = [[(0, 0), (1, 1)], # C11
[(0, 1)], # C12
[(0, 2), (1, 2)], # C13
[(2, 2)], # C33
[(3, 3), (4, 4)], # C44
[(5, 5)]] # C66
elif len(cijs) == 7:
# Tetragonal II / Laue class 4/m
cijs = list(cijs)
cijs.insert(4, -cijs[3]) # C26 = -C16
index_lists = [[(0, 0), (1, 1)], # C11
[(0, 1)], # C12
[(0, 2), (1, 2)], # C13
[(0, 5)], # C16
[(1, 5)], # C26
[(2, 2)], # C33
[(3, 3), (4, 4)], # C44
[(5, 5)]] # C66
else:
raise Exception('Tetragonal materials should have '
'either 6 or 7 independent Cijs')
AnisotropicMaterial.__init__(
self, rho, voigt_array_from_cijs(cijs, index_lists))
class RhombohedralMaterial(AnisotropicMaterial):
"""
A class derived from the AnisotropicMaterial base class
Initialization takes two input parameters; rho and
[C11, C12, C13, C14, C33, C44, C66] or
[C11, C12, C13, C14, C15, C33, C44, C66]
"""
def __init__(self, rho, cijs):
cijs = list(cijs)
if len(cijs) == 7:
# Rhombohedral I / Laue class \bar{3}m
cijs.insert(4, -cijs[3]) # C24 = -C14
index_lists = [[(0, 0), (1, 1)], # C11
[(0, 1)], # C12
[(0, 2), (1, 2)], # C13
[(0, 3), (4, 5)], # C14
[(1, 3)], # C24
[(2, 2)], # C33
[(3, 3), (4, 4)], # C44
[(5, 5)]] # C66
elif len(cijs) == 8:
# Rhombohedral II / Laue class \bar{3}
cijs.insert(4, -cijs[3]) # C24 = -C14
cijs.insert(6, -cijs[5]) # C25 = -C15
index_lists = [[(0, 0), (1, 1)], # C11
[(0, 1)], # C12
[(0, 2), (1, 2)], # C13
[(0, 3), (4, 5)], # C14
[(1, 3)], # C24
[(0, 4)], # C15
[(1, 4), (3, 5)], # C25
[(2, 2)], # C33
[(3, 3), (4, 4)], # C44
[(5, 5)]] # C66
else:
raise Exception('Rhombohedral materials should have '
'either 7 or 8 independent Cijs')
AnisotropicMaterial.__init__(
self, rho, voigt_array_from_cijs(cijs, index_lists))
class OrthorhombicMaterial(AnisotropicMaterial):
"""
A class derived from the AnisotropicMaterial base class
Initialization takes two input parameters; rho and
[C11, C12, C13, C22, C23, C33, C44, C55, C66]
"""
def __init__(self, rho, cijs):
assert len(cijs) == 9
index_lists = [[(0, 0)], # C11
[(0, 1)], # C12
[(0, 2)], # C13
[(1, 1)], # C22
[(1, 2)], # C23
[(2, 2)], # C33
[(3, 3)], # C44
[(4, 4)], # C55
[(5, 5)]] # C66
AnisotropicMaterial.__init__(
self, rho, voigt_array_from_cijs(cijs, index_lists))
class MonoclinicMaterial(AnisotropicMaterial):
"""
A class derived from the AnisotropicMaterial base class
Initialization takes two input parameters; rho and
[C11, C12, C13, C15, C22, C23, C25, C33, C35, C44, C46, C55, C66]
"""
def __init__(self, rho, cijs):
assert len(cijs) == 13
index_lists = [[(0, 0)], # C11
[(0, 1)], # C12
[(0, 2)], # C13
[(0, 4)], # C15
[(1, 1)], # C22
[(1, 2)], # C23
[(1, 4)], # C25
[(2, 2)], # C33
[(2, 4)], # C35
[(3, 3)], # C44
[(3, 5)], # C46
[(4, 4)], # C55
[(5, 5)]] # C66
AnisotropicMaterial.__init__(
self, rho, voigt_array_from_cijs(cijs, index_lists))
class TriclinicMaterial(AnisotropicMaterial):
"""
A class derived from the AnisotropicMaterial base class
Initialization takes two input parameters; rho and
[Cij, where 1<=i<=6 and i<=j<=6]
"""
def __init__(self, rho, cijs):
assert len(cijs) == 21
index_lists=[[(i, j)] for i in range(6) for j in range(i, 6)]
AnisotropicMaterial.__init__(
self, rho, voigt_array_from_cijs(cijs, index_lists))
| gpl-2.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/lines_bars_and_markers/multicolored_line.py | 1 | 2345 | '''
==================
Multicolored lines
==================
This example shows how to make a multi-colored line. In this example, the line
is colored based on its derivative.
'''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
x = np.linspace(0, 3 * np.pi, 500)
y = np.sin(x)
dydx = np.cos(0.5 * (x[:-1] + x[1:])) # first derivative
# Create a set of line segments so that we can color them individually
# This creates the points as a N x 1 x 2 array so that we can stack points
# together easily to get the segments. The segments array for line collection
# needs to be (numlines) x (points per line) x 2 (for x and y)
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
fig, axs = plt.subplots(2, 1, sharex=True, sharey=True)
# Create a continuous norm to map from data points to colors
norm = plt.Normalize(dydx.min(), dydx.max())
lc = LineCollection(segments, cmap='viridis', norm=norm)
# Set the values used for colormapping
lc.set_array(dydx)
lc.set_linewidth(2)
line = axs[0].add_collection(lc)
fig.colorbar(line, ax=axs[0])
# Use a boundary norm instead
cmap = ListedColormap(['r', 'g', 'b'])
norm = BoundaryNorm([-1, -0.5, 0.5, 1], cmap.N)
lc = LineCollection(segments, cmap=cmap, norm=norm)
lc.set_array(dydx)
lc.set_linewidth(2)
line = axs[1].add_collection(lc)
fig.colorbar(line, ax=axs[1])
axs[0].set_xlim(x.min(), x.max())
axs[0].set_ylim(-1.1, 1.1)
pltshow(plt)
| mit |
davidsamu/seal | seal/plot/putil.py | 1 | 26402 | # -*- coding: utf-8 -*-
"""
Collection of plotting-related utility functions.
@author: David Samu
"""
import warnings
from itertools import cycle
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.ticker as plticker
from matplotlib import pyplot as plt
from matplotlib import gridspec as gs
from matplotlib import collections as mc
import seaborn as sns
from seal.util import util, constants
# %% Matplotlib setup and some plotting constants.
mpl.rc('figure', autolayout=True) # to prevent object being cut off
ColConv = mpl.colors.ColorConverter()
t_lbl = 'Time since {} (ms)'
FR_lbl = 'Firing rate (sp/s)'
# Period (frequency) of tick marks and tick labels for time axes. In ms!
t_tick_mrks_prd = 500
t_tick_lbls_prd = 1000 # this has to be multiple of marks period!
my_color_list = ['b', 'r', 'm', 'g', 'c', 'y']
# Stimulus and cue colors.
stim_colors = pd.Series(['m', 'g'], index=['S1', 'S2'])
cue_colors = pd.Series(['grey', 'red', 'blue'],
index=['all', 'loc', 'dir'])
# Default Matplotlib RC params.
tick_pad = 3
tick_size = 4
tick_minor_fac = 0.75
seal_rc_params = {'xtick.major.pad': tick_pad,
'xtick.minor.pad': tick_minor_fac*tick_pad,
'ytick.major.pad': tick_pad,
'ytick.minor.pad': tick_minor_fac*tick_pad,
'xtick.major.size': tick_size,
'xtick.minor.size': tick_minor_fac*tick_size,
'ytick.major.size': tick_size,
'ytick.minor.size': tick_minor_fac*tick_size}
# Default decorator height levels.
ypos_marker = 0.92
ypos_lbl = 0.96
# %% Info plots.
def get_unit_info_title(u, fullname=False):
"""Plot unit info as text labels."""
# Init dict of info labels to plot.
upars = u.get_unit_params()
# Init formatted parameter values.
fpars = [('isolation', '{}'),
('SNR', 'SNR: {:.2f}'),
('ISIvr', 'ISIvr: {:.2f}%'),
('TrueSpikes', 'TrSpRt: {:.0f}%'),
('BS/NS', '{}'),
('mWfDur', 'Wf dur: {:.0f} $\mu$s'),
('Fac/Sup', '{}'),
('mFR', 'mean rate: {:.1f} sp/s'),
('baseline', 'baseline rate: {:.1f} sp/s'),
('included', 'included')]
fvals = [(meas, f.format(upars[meas]) if meas in upars else 'N/A')
for meas, f in fpars]
fvals = util.series_from_tuple_list(fvals)
# Create info lines.
# Start with unit name and 'excluded' tag if unit is excluded from task.
header = upars.Name if fullname else upars.task
header += ' [excluded]' if u.is_excluded() else ''
info_lines = '\n\n{}\n\n\n\n'.format(header)
# Add stimulus parameters.
s1locs, s2locs = [', '.join(['({:.1f}, {:.1f})'.format(x, y)
for (x,y) in u.TrData[(stim, 'Loc')].unique()])
for stim in ('S1', 'S2')]
info_lines += 'S1 locations: {} | S2 locations: {}\n\n'.format(s1locs,
s2locs)
# Unit type.
info_lines += '{} ({}, {}, {})\n\n'.format(fvals['isolation'],
fvals['SNR'], fvals['ISIvr'],
fvals['TrueSpikes'])
# Waveform duration.
info_lines += '{}\n\n'.format(fvals['mWfDur'])
#info_lines += '{} ({})\n\n'.format(fvals['BS/NS'], fvals['mWfDur'])
# Firing rate.
# Facilitatory or suppressive?
info_lines += '{}\n\n'.format(fvals['baseline'])
#info_lines += '{}, {}, {}\n\n'.format(fvals['Fac/Sup'], fvals['mFR'],
# fvals['baseline'])
return info_lines
# %% Generic plot decorator functions.
def add_unit_labels(ax, uids, x, y, sep='/', color='grey'):
"""Add unit labels to plot."""
if (len(uids) != len(x)) or (len(uids) != len(y)):
warnings.warn('Number of unit labels and data points differ.')
unames = [sep.join([str(ni) for ni in uid]) for uid in list(uids)]
for xi, yi, uni in zip(x, y, unames):
ax.annotate(uni, [xi, yi], color=color)
return
def plot_signif_prds(sign_prds, ypos=None, color='c', linewidth=4, ax=None):
"""Add significant intervals to axes."""
ax = axes(ax)
if ypos is None:
ymin, ymax = ax.get_ylim()
ypos = ymin + 0.01 * (ymax - ymin)
# Assamble line segments and add them to axes.
line_segments = [[(t1, ypos), (t2, ypos)] for t1, t2 in sign_prds]
lc = mc.LineCollection(line_segments, colors=ColConv.to_rgba(color),
linewidth=linewidth)
lc.sign_prd = True # add label to find these artists later
ax.add_collection(lc)
def highlight_axes(ax=None, color='red', alpha=0.5, **kwargs):
"""Highlight axes."""
ax = axes(ax)
rect = mpl.patches.Rectangle(xy=(0, 0), width=1, height=1, color=color,
transform=ax.transAxes, alpha=alpha, **kwargs)
ax.add_artist(rect)
def plot_periods(prds=None, alpha=0.20, color='grey', ax=None, **kwargs):
"""Highlight segments (periods)."""
if prds is None:
# Both periods with default timing.
prds = [[stim] + list(constants.fixed_tr_prds.loc[stim])
for stim in constants.stim_dur.index]
ax = axes(ax)
xmin, xmax = ax.get_xlim()
for name, t_start, t_stop in prds:
if t_start is None:
t_start = xmin
if t_stop is None:
t_stop = xmax
ax.axvspan(t_start, t_stop, alpha=alpha, color=color, **kwargs)
def plot_events(events, add_names=True, color='black', alpha=1.0,
ls='--', lw=1, lbl_rotation=90, y_lbl=ypos_lbl,
lbl_ha='center', ax=None, **kwargs):
"""Plot all events of unit."""
if events is None:
return
ax = axes(ax)
# Init y extents of lines and positions of labels.
ylim = ax.get_ylim()
yloc = ylim[0] + y_lbl * (ylim[1] - ylim[0])
ymax = y_lbl-0.02 if add_names else 1
# Add each event to plot as a vertical line.
for ev_name, (time, label) in events.iterrows():
ax.axvline(time, color=color, alpha=alpha, lw=lw, ls=ls,
ymax=ymax, **kwargs)
# Add event label if requested
if add_names:
txt = ax.text(time, yloc, label, rotation=lbl_rotation,
fontsize='small', va='bottom', ha=lbl_ha)
txt.event_lbl = True # add label to find these artists later
def plot_event_markers(events, ypos=ypos_marker, marker='o', ms=6, mew=1,
mec='orange', mfc='None', ax=None, **kwargs):
"""Add event markers to plot."""
if events is None:
return
ax = axes(ax)
# Init y position.
ylim = ax.get_ylim()
y = ylim[0] + ypos * (ylim[1] - ylim[0])
for event_data in events:
ev_time = event_data['time']
ev_mec = event_data['color'] if 'color' in event_data else mec
ev_mfc = event_data['color'] if 'color' in event_data else mfc
marker = ax.plot(ev_time, y, marker, ms=ms, mew=mew, mec=ev_mec,
mfc=ev_mfc, **kwargs)[0]
marker.set_clip_on(False) # disable clipping
marker.event_marker = True # add label to find these artists later
def add_chance_level(ylevel=0.5, color='grey', ls='--', alpha=1, lw=1,
zorder=0, ax=None):
"""Add horizontal line denoting chance level for decoder accuracy plot."""
ax = axes(ax)
ax.axhline(ylevel, color=color, ls=ls, alpha=alpha, zorder=zorder, lw=lw)
def add_baseline(baseline=0, color='grey', ls='--', lw=1,
zorder=0, ax=None, **kwargs):
"""Add baseline rate to plot."""
ax = axes(ax)
if is_polar(ax): # polar plot
theta, radius = np.linspace(0, 2*np.pi, 100), baseline*np.ones(100)
ax.plot(theta, radius, color=color, ls=ls, lw=lw,
zorder=zorder, **kwargs)
else:
ax.axhline(baseline, color=color, ls=ls, lw=lw,
zorder=zorder, **kwargs)
def add_zero_line(axis='both', color='grey', ls='--', alpha=0.5,
zorder=0, ax=None):
"""Add zero line to x and/or y axes."""
ax = axes(ax)
if axis in ('x', 'both'):
ax.axhline(0, color=color, ls=ls, zorder=zorder, alpha=alpha)
if axis in ('y', 'both'):
ax.axvline(0, color=color, ls=ls, zorder=zorder, alpha=alpha)
def add_identity_line(equal_xy=False, color='grey', ls='--',
zorder=0, ax=None, lw=1):
"""Add identity (x=y) line to axes."""
ax = axes(ax)
if equal_xy: # Safer to use this option, if x and y axes are equalised.
xymin, xymax = 0, 1
transform = ax.transAxes
else: # Less safe because it breaks if axes limits are changed afterwards.
[xmin, xmax] = ax.get_xlim()
[ymin, ymax] = ax.get_ylim()
xymin = max(xmin, ymin)
xymax = min(xmax, ymax)
transform = None
xy = [xymin, xymax]
ax.plot(xy, xy, color=color, ls=ls, zorder=zorder, transform=transform)
def add_bar_height_label(ax, ndigit=2, vpos='top', bar_patches=None):
"""Put heights of bars as labels to top or botton of pars."""
ax = axes(ax)
# Use all patches by default.
if bar_patches is None:
bar_patches = ax.patches
frm_str = '{:.' + str(ndigit) + 'f}'
for p in bar_patches:
height = p.get_height()
x = p.get_x() + p.get_width()/2.
y = height if vpos == 'top' else 0
lbl = frm_str.format(height)
ax.text(x, y, lbl, ha='center', va='bottom')
def add_downward_arrow(ax, x, ystart, length, head_width=1,
head_length=1, fc='k', ec='k', **kwargs):
"""Add downward pointing arrow."""
ax = axes(ax)
ax.arrow(x, ystart, 0, -length, head_width=head_width,
head_length=head_length, fc=fc, ec=ec, **kwargs)
# %% Functions to adjust position of plot decorators
# (significance lines, event labels and markers, etc.).
def adjust_decorators(ax=None, ypos=None, y_lbl=ypos_lbl, y_mkr=ypos_marker):
"""
Meta function to adjust position of all plot decorators,
typically after resetting y-limit, e.g. to match across set of axes.
"""
move_signif_lines(ax, ypos)
move_event_lbls(ax, y_lbl)
move_event_markers(ax, y_mkr)
def move_signif_lines(ax=None, ypos=None):
"""Move significance line segments to top of plot."""
# Init.
ax = axes(ax)
if ypos is None:
ypos = ax.get_ylim()[1] # move them to current top
# Find line segments in axes representing significant periods.
for c in ax.collections:
if isinstance(c, mc.LineCollection) and hasattr(c, 'sign_prd'):
segments = [np.array((seg[:, 0], seg.shape[0]*[ypos])).T
for seg in c.get_segments()]
c.set_segments(segments)
def move_event_lbls(ax=None, y_lbl=ypos_lbl):
"""Move event labels to top of plot."""
# Init.
ax = axes(ax)
ylim = ax.get_ylim()
y = ylim[0] + y_lbl * (ylim[1] - ylim[0])
# Find line segments in axes representing significant periods.
for txt in ax.texts:
if hasattr(txt, 'event_lbl'):
txt.set_y(y)
def move_event_markers(ax=None, y_mkr=ypos_marker):
"""Move event markers to top of plot."""
# Init.
ax = axes(ax)
ylim = ax.get_ylim()
y = ylim[0] + y_mkr * (ylim[1] - ylim[0])
# Find line segments in axes representing significant periods.
for line in ax.lines:
if hasattr(line, 'event_marker'):
line.set_ydata(y)
# %% Functions to adjust plot limits and aspect.
def set_limits(ax=None, xlim=None, ylim=None):
"""Generic function to set limits on axes."""
ax = axes(ax)
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
def match_xy_limits(ax=None, tick_interval=10):
"""Match aspect (limits) of x and y axes."""
ax = axes(ax)
xlim, ylim = ax.get_xlim(), ax.get_ylim()
lim = [min(xlim[0], ylim[0]), max(xlim[1], ylim[1])]
ax.set_xlim(lim)
ax.set_ylim(lim)
synch_ticks(ax, tick_interval)
def set_aspect(ax=None, aspect=1, adjustable='datalim', anchor=None):
"""Matches aspect ratio of axes."""
ax = axes(ax)
ax.set_aspect(aspect, adjustable, anchor)
def sync_axes(axs, sync_x=False, sync_y=False):
"""Synchronize x and/or y axis across list of axes."""
if not len(axs):
return
# Synchronise x-axis limits across plots.
if sync_x:
all_xlims = np.array([ax.get_xlim() for ax in axs])
xlim = (all_xlims[:, 0].min(), all_xlims[:, 1].max())
[ax.set_xlim(xlim) for ax in axs]
# Synchronise y-axis limits across plots.
if sync_y:
all_ylims = np.array([ax.get_ylim() for ax in axs])
ylim = (all_ylims[:, 0].min(), all_ylims[:, 1].max())
[ax.set_ylim(ylim) for ax in axs]
return axs
# %% Functions to set axes labels, title and legend.
def set_labels(ax=None, xlab=None, ylab=None, title=None, ytitle=None,
xlab_kws=dict(), ylab_kws=dict(), title_kws=dict()):
"""Generic function to set title, labels and ticks on axes."""
if ytitle is None:
ytitle = 1.04
ax = axes(ax)
if title is not None:
ax.set_title(title, y=ytitle, **title_kws)
if xlab is not None:
ax.set_xlabel(xlab, **xlab_kws)
if ylab is not None:
ax.set_ylabel(ylab, **ylab_kws)
ax.tick_params(axis='both', which='major')
def set_legend(ax, loc=0, frameon=False, **kwargs):
"""Add legend to axes."""
ax = axes(ax)
legend = ax.legend(loc=loc, frameon=frameon, **kwargs)
return legend
def hide_legend(ax=None):
"""Hide axes legend."""
ax = axes(ax)
if ax.legend_ is not None:
ax.legend().set_visible(False)
def hide_legend_title(ax=None):
"""Hide title of legend."""
ax = axes(ax)
if ax.legend_ is not None:
ax.legend_.set_title(None)
# %% Functions to set/hide ticks and spines.
def set_spines(ax=None, bottom=True, left=True, top=False, right=False):
"""Remove selected spines (axis lines) from axes."""
ax = axes(ax)
if is_polar(ax): # polar plot
ax.spines['polar'].set_visible(bottom)
else: # Cartesian plot
ax.spines['bottom'].set_visible(bottom)
ax.spines['left'].set_visible(left)
ax.spines['top'].set_visible(top)
ax.spines['right'].set_visible(right)
def hide_spines(ax=None):
"""Hides all spines of axes."""
set_spines(ax, False, False, False, False)
def set_ticks_side(ax=None, xtick_pos='bottom', ytick_pos='left'):
"""Remove selected tick marks on axes.
xtick_pos: [ 'bottom' | 'top' | 'both' | 'default' | 'none' ]
ytick_pos: [ 'left' | 'right' | 'both' | 'default' | 'none' ]
"""
ax = axes(ax)
ax.xaxis.set_ticks_position(xtick_pos)
ax.yaxis.set_ticks_position(ytick_pos)
def hide_ticks(ax=None, show_x_ticks=False, show_y_ticks=False):
"""Hide ticks (both marks and labels) on either or both axes."""
ax = axes(ax)
if not show_x_ticks:
ax.get_xaxis().set_ticks([])
if not show_y_ticks:
ax.get_yaxis().set_ticks([])
def hide_tick_marks(ax=None, show_x_tick_mrks=False, show_y_tick_mrks=False):
"""Hide ticks marks (but not tick labels) on either or both axes."""
ax = axes(ax)
if not show_x_tick_mrks:
ax.tick_params(axis='x', which='both', length=0)
if not show_y_tick_mrks:
ax.tick_params(axis='y', which='both', length=0)
def hide_tick_labels(ax=None, show_x_tick_lbls=False, show_y_tick_lbls=False):
"""Hide tick labels (but not tick marks) on either or both axes."""
ax = axes(ax)
if not show_x_tick_lbls:
ax.tick_params(labelbottom='off')
if not show_y_tick_lbls:
ax.tick_params(labelleft='off')
# Alternatively:
# lbls = len(ax.get_xticklabels()) * ['']
# ax.set_xticklabels(lbls)
def hide_axes(ax=None, show_x=False, show_y=False, show_polar=False):
"""Hide all ticks, labels and spines of either or both axes."""
# Hide axis ticks and labels.
ax = axes(ax)
ax.xaxis.set_visible(show_x)
ax.yaxis.set_visible(show_y)
# Hide requested spines of axes. (And don't change the others!)
to_hide = []
if is_polar(ax):
if not show_polar: # polar plot
to_hide.append('polar')
else:
if not show_x:
to_hide.extend(['bottom', 'top'])
if not show_y:
to_hide.extend(['left', 'right'])
[ax.spines[side].set_visible(False) for side in to_hide]
def get_tick_marks_and_labels(t1, t2, mrks_prd=t_tick_mrks_prd,
lbls_prd=t_tick_lbls_prd):
"""
Return tick marks and tick labels for time window.
t1 and t2 are assumed to be in ms.
"""
# Calculate tick mark positions.
t1_limit = np.ceil(t1/mrks_prd) * mrks_prd
t2_limit = t2 + 1
tick_mrks = np.arange(t1_limit, t2_limit, mrks_prd)
# Create tick labels for marks.
tick_lbls = [str(int(t)) if not t % lbls_prd else '' for t in tick_mrks]
return tick_mrks, tick_lbls
def set_xtick_labels(ax=None, pos=None, lbls=None, **kwargs):
"""Set tick labels on x axis."""
ax = axes(ax)
if pos is not None:
ax.set_xticks(pos)
if lbls is not None:
ax.set_xticklabels(lbls, **kwargs)
def set_ytick_labels(ax=None, pos=None, lbls=None, **kwargs):
"""Set tick labels on y axis."""
ax = axes(ax)
if pos is not None:
ax.set_yticks(pos)
if lbls is not None:
ax.set_yticklabels(lbls, **kwargs)
def rot_xtick_labels(ax=None, rot=45, ha='right'):
"""Rotate labels on x axis."""
ax = axes(ax)
ax.set_xticklabels(ax.get_xticklabels(), rotation=rot, ha=ha)
def rot_ytick_labels(ax=None, rot=45, va='top'):
"""Rotate labels on y axis."""
ax = axes(ax)
ax.set_yticklabels(ax.get_yticklabels(), rotation=rot, va=va)
def set_max_n_ticks(ax=None, max_n_ticks=5, axis='both'):
"""Set maximum number of ticks on axes."""
ax = axes(ax)
ax.locator_params(axis=axis, nbins=max_n_ticks-1)
def sparsify_tick_labels(fig, ax=None, axis='x', freq=10, istart=0,
reverse=False):
"""Sparsify tick labels on y axis by keeping only every n-th label."""
ax = axes(ax)
# Must draw the canvas first to position the ticks.
fig.canvas.draw()
all_lbls = ax.get_xticklabels() if axis is 'x' else ax.get_yticklabels()
fset_lbls = set_xtick_labels if axis is 'x' else set_ytick_labels
lbls = [lbl.get_text() for lbl in all_lbls]
if reverse:
lbls = lbls[::-1]
lbls = [lbl if i >= istart and (i-istart) % freq == 0 else ''
for i, lbl in enumerate(lbls)]
if reverse:
lbls = lbls[::-1]
fset_lbls(ax, lbls=lbls)
def synch_ticks(ax=None, tick_interval=10):
"""Synchronize tick labels between axes."""
ax = axes(ax)
# Puts ticks at regular intervals.
loc = plticker.MultipleLocator(base=tick_interval)
ax.xaxis.set_major_locator(loc)
ax.yaxis.set_major_locator(loc)
# %% Functions to create and access axes and figures.
def axes(ax=None, **kwargs):
"""Return new (or passed) axes."""
if ax is None:
ax = plt.gca(**kwargs)
return ax
def is_polar(ax=None):
"""Check if axes is polar type."""
ax = axes(ax)
im_polar = 'polar' in ax.spines
return im_polar
def add_mock_axes(fig, sps, **kwargs):
"""Add mock (empty) axes to figure."""
ax = fig.add_subplot(sps, **kwargs)
hide_axes(ax=ax)
return ax
def figure(fig=None, **kwargs):
"""Return new (or passed) figure."""
if fig is None:
fig = plt.figure(**kwargs)
return fig
def gridspec(nrow, ncol, gsp=None, **kwargs):
"""Return new GridSpec instance."""
if gsp is None:
gsp = gs.GridSpec(nrow, ncol, **kwargs)
return gsp
def sps_fig(sps=None, fig=None):
"""Return new (or passed) sps and figure."""
fig = figure(fig)
if sps is None:
sps = gridspec(1, 1)[0]
return sps, fig
def get_gs_subplots(nrow=None, ncol=None, subw=2, subh=2, ax_kws_list=None,
create_axes=False, as_array=True, fig=None, **kwargs):
"""Return list or array of GridSpec subplots."""
# If ncol not specified: get approx'ly equal number of rows and columns.
if ncol is None:
nplots = nrow
nrow = int(np.floor(np.sqrt(nplots)))
ncol = int(np.ceil(nplots / nrow))
else:
nplots = nrow * ncol
# Create figure and gridspec object.
if fig is None:
fig = figure(figsize=(ncol*subw, nrow*subh))
gsp = gridspec(nrow, ncol, **kwargs)
axes = None
# Create list (or array) of axes.
if create_axes:
# Don't pass anything by default.
if ax_kws_list is None:
ax_kws_list = nplots * [{}]
# If single dictionary has been passed, instead of list of
# plot-specific params in dict, use it for all subplots.
elif isinstance(ax_kws_list, dict):
ax_kws_list = nplots * [ax_kws_list]
# Create axes objects.
axes = [fig.add_subplot(gs, **ax_kws)
for gs, ax_kws in zip(gsp, ax_kws_list)]
# Turn off last (unrequested) axes on grid, if any.
if nplots < nrow * ncol:
[ax.axis('off') for ax in axes[nplots:]]
# Convert from list to array.
if as_array:
axes = np.array(axes).reshape(gsp.get_geometry())
return fig, gsp, axes
def embed_gsp(outer_gsp, nrow, ncol, **kwargs):
"""Return GridSpec embedded into outer SubplotSpec."""
sub_gsp = gs.GridSpecFromSubplotSpec(nrow, ncol, outer_gsp, **kwargs)
return sub_gsp
def sps_add_axes(fig, sps, nrow, ncol, **kwargs):
"""Add axes to SubplotSpec."""
gsp = embed_gsp(sps, nrow, ncol)
axes = [fig.add_subplot(sub_gsp, **kwargs) for sub_gsp in gsp]
return axes
# %% Functions to save figure.
def save_fig(ffig, fig=None, title=None, fs_title='xx-large', ytitle=1.01,
va_title='bottom', rect_height=None, border=0.03, pad=1.0,
h_pad=None, w_pad=None, dpi=300, bbox_extra_artists=None,
close=True, tight_layout=True, **kwargs):
"""Save composite (GridSpec) figure to file."""
# Init figure and folder to save figure into.
if ffig is None:
return
util.create_dir(ffig)
if fig is None:
fig = plt.gcf()
# Add super title to figure.
if title is not None:
fig.suptitle(title, y=ytitle, fontsize=fs_title,
va=va_title, **kwargs)
# Adjust plotting area and set tight layout.
if rect_height is None: # relative height of plotted area
rect_height = ytitle - border
rect = [border, border, 1.0-border, rect_height]
if tight_layout:
fig.tight_layout(rect=rect, pad=pad, h_pad=h_pad, w_pad=w_pad)
# Suppress warning about axes being incompatible with tight layout.
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UserWarning)
# Write figure out into file.
fig.savefig(ffig, bbox_extra_artists=bbox_extra_artists,
dpi=dpi, bbox_inches='tight')
# Finally, close figure.
if close:
plt.close(fig)
# %% Miscellanous plotting related functions.
def get_cmap(cm_name='jet', **kwargs):
"""Return colormap instance."""
cmap = plt.get_cmap(cm_name, **kwargs)
return cmap
def get_rgba_values(v, cm_name='jet', **kwargs):
"""Return RGBA values for each scalar value in vector."""
cmap = get_cmap(cm_name, **kwargs)
v_rgba = [cmap(vi) for vi in v]
return v_rgba
def get_colors(mpl_colors=False, as_cycle=True):
"""Return colour cycle."""
if mpl_colors:
col_cylce = mpl.rcParams['axes.prop_cycle'] # mpl default color list
cols = [d['color'] for d in col_cylce]
else:
cols = my_color_list # custom list of colors
if as_cycle: # for cyclic indexing
cols = cycle(cols)
return cols
def convert_to_rgb(cname):
"""Return RGB tuple of color name (e.g. 'red', 'r', etc)."""
rgb = ColConv.to_rgb(cname)
return rgb
def convert_to_rgba(cname):
"""Return RGBA tuple of color name (e.g. 'red', 'r', etc)."""
rgba = ColConv.to_rgba(cname)
return rgba
def get_cmat(to_color, fcol='blue', bcol='white', ncol='grey', rgba=False):
"""
Return foreground/background/non-testable RGB/RGBA color matrix
for array of points.
"""
# Init foreground and background colors.
if isinstance(fcol, str):
fcol = convert_to_rgb(fcol)
if isinstance(bcol, str):
bcol = convert_to_rgb(bcol)
if isinstance(ncol, str):
ncol = convert_to_rgb(ncol)
# Create color matrix of points.
col_mat = np.array(len(to_color) * [ncol])
col_mat[to_color == 1, :] = fcol
col_mat[to_color == 0, :] = bcol
return col_mat
def get_artist(label, color, artist_type='patch', **kwargs):
"""Return an artist. Useful for creating custom legends."""
if artist_type == 'patch':
artist = mpl.patches.Patch(color=color, label=label, **kwargs)
else: # line
artist = mpl.lines.Line2D([], [], color=color, label=label, **kwargs)
return artist
# %% Plotting related meta-functions.
def format_plot(ax=None, xlim=None, ylim=None, xlab=None, ylab=None,
title=None, ytitle=None):
"""Generic plotting function."""
# Format plot.
set_limits(ax, xlim, ylim)
set_ticks_side(ax)
set_spines(ax)
set_labels(ax, xlab, ylab, title, ytitle)
return ax
def set_style(context='notebook', style='darkgrid', palette='deep',
color_codes=True, rc=seal_rc_params):
"""Set Seaborn style, context and other matplotlib style parameters."""
# 'style': 'darkgrid', 'whitegrid', 'dark', 'white' or 'ticks'.
# 'context': 'notebook', 'paper', 'poster' or 'talk'.
sns.set(context=context, style=style, palette=palette,
color_codes=color_codes, rc=rc)
def inline_on():
"""Turn on inline plotting."""
plt.ion()
def inline_off():
"""Turn off inline plotting."""
plt.ioff()
| gpl-3.0 |
fatiando/fatiando | gallery/gridder/padding.py | 6 | 1944 | """
Pad the edges of grids using various methods
=============================================
Sometimes it is useful to add some padding points to the edges of grids, for
example during FFT-based processing to avoid edge effects.
Function :func:`fatiando.gridder.pad_array` does this using various padding
methods.
Functions
:func:`fatiando.gridder.unpad_array` (to remove padding) and
:func:`fatiando.gridder.pad_coords` (to created padded coordinate arrays)
offer support for common operations done while padding.
"""
import matplotlib.pyplot as plt
import numpy as np
from fatiando import gridder
# Generate some synthetic data
area = (-100, 100, -60, 60)
shape = (101, 172)
# The padding functions need data to be on a regular grid and represented by a
# 2D numpy array. So I'll convert the outputs to 2D.
x, y = gridder.regular(area, shape)
x = x.reshape(shape)
y = y.reshape(shape)
data = np.sin(0.1*x)*np.cos(0.09*y) + 0.001*(x**2 + y**2)
# Pad arrays with all the padding options and make a single figure with all of
# them.
fig, axes = plt.subplots(2, 4, figsize=(10, 6), sharex=True, sharey=True)
ax = axes[0, 0]
ax.set_title('Original')
# Keep all plots on the same color scale of the original data
vmin, vmax = data.min(), data.max()
ax.pcolormesh(y, x, data, cmap='RdBu_r', vmin=vmin, vmax=vmax)
padtypes = ['0', 'mean', 'edge', 'lintaper', 'reflection', 'oddreflection',
'oddreflectiontaper']
for padtype, ax in zip(padtypes, axes.ravel()[1:]):
padded_data, nps = gridder.pad_array(data, padtype=padtype)
# Get coordinate vectors
pad_x, pad_y = gridder.pad_coords([x, y], shape, nps)
padshape = padded_data.shape
ax.set_title(padtype)
ax.pcolormesh(pad_y.reshape(padshape), pad_x.reshape(padshape),
padded_data, cmap='RdBu_r', vmin=vmin, vmax=vmax)
ax.set_xlim(pad_y.min(), pad_y.max())
ax.set_ylim(pad_x.min(), pad_x.max())
plt.tight_layout(w_pad=0)
plt.show()
| bsd-3-clause |
mjgrav2001/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
Yurlungur/cactus_scripts | plot_gaugewave_of_time.py | 1 | 6944 | #!/usr/bin/env python2
"""
plot_gaugewave_of_time.py
Author: Jonah Miller ([email protected])
Time-stamp: <2014-02-07 18:01:28 (jonah)>
This is a simple library to plot the norm of g_{xx} as a function of
time for a gaugewave. Several gaugewave files can be plotted at the
same time.
Example call
python2 plot_gaugewave_of_time.py *.metric.x.asc
"""
# Imports
# ----------------------------------------------------------------------
import numpy as np # For array support
import extract_tensor_data as etd # For tensor support
import plot_gaugewave as pg
# Optimization tools
from scipy.optimize import curve_fit
# Plot tools
import matplotlib as mpl
import matplotlib.pyplot as plt
# Linear algebra
from numpy.linalg import norm
# For command line
import sys
# ----------------------------------------------------------------------
# Global constants
# ----------------------------------------------------------------------
NORM_ERROR_TITLE = "{}-norm of Gaugewave {} Error as a Function of Time"
XLABEL = "Time (in wave periods)"
NORM_ERROR_Y_LABEL = "{}-norm({})"
T_INDEX = (0,0) # The indices of the element of the tensor we care about
LINEWIDTH = 5
FONTSIZE = 20
COORD = 0 # The x coordinate
ORDER = 2 # Order of norm
TENSOR_NAME = r'$g_{xx}$' # Name of the tensor we want to plot
RK_ORDER=4
# ----------------------------------------------------------------------
def gaugewave_gxx(x,t,A,D):
"""
The analytic form of the gxx component of a gaugewave as a
function of x and t. It is independent of y and z.
"""
return 1 - A*np.sin(2*np.pi*(x-t)/D)
def gaugewave_kxx(x,t,A,D):
"""
The analytic form of the kxx component of a gaugewave as a
function of x and t. It is independent of y and z.
"""
numerator = -np.pi*A*np.cos(2*np.pi*(x-t)/D)
denominator = D*np.sqrt(1-A*np.sin(2*np.pi*(x-t)/D))
return numerator/denominator
def get_Tij_data_of_index(i,j,coord,filename_list):
"""
Takes a list of filenames (strings) and generates a list of
"evolutions." Each evolution is a list containing snapshots for a
given file and contains a list of snapshots of the form
[time,positions,Tijs]
where (positions,Tijs) are sufficient data to plot the Tij element
of the tensor as a function of position along the coord-axis. time
is the time of the snapshot.
The function also returns an "h_list" containing the lattice
spacing of the grid in the file for each file. Ordering is the
same as filename list.
"""
evolutions_list = []
h_list = []
for filename in filename_list:
evolutions = []
data = etd.extract_data(filename)
for snapshot in data:
time = snapshot[0][4]
positions,Tijs = etd.element_of_position_at_snapshot(i,j,coord,
snapshot)
evolutions.append([time,positions,Tijs])
evolutions_list.append(evolutions)
h_list.append(etd.get_lattice_spacing(data))
return evolutions_list,h_list
def get_norm_error(function,position,Tij,time,order=2):
"""
Takes the position and Tij data for a given spacetime at a given
time and calculates the norm of the error at that time.
"""
return norm(pg.get_error(function,position,Tij,time),ord=order)
def get_norm_error_of_time(function,evolution,order=2):
"""
Takes an evolution and calculates its distance from function as a
function of time.
"""
times = []
errors = []
for snapshot in evolution:
times.append(snapshot[0])
errors.append(get_norm_error(function,
snapshot[1],snapshot[2],snapshot[0],
order))
times = np.array(times)
errors = np.array(errors)
return times,errors
def calculate_analytic_form(function,snapshot):
"""
Takes a snapshot extracted from get_Tij_data_of_index at a time
where it is known to be analytically correct and fits function to
it. Returns a fitted function which should be the analytic
solution.
The function is assumed to be of the form
f(x,t,A,D)
where t is the time, x is the position, A is the amplitude, and D
is the period.
"""
# Convenience variables
time = snapshot[0]
positions = snapshot[1]
Tijs = snapshot[2]
assert len(positions) == len(Tijs)
# Set the time in the function we want to fit
func1 = lambda x,A,D: function(x,time,A,D)
popt,pcov = curve_fit(func1,positions,Tijs)
return lambda x,t: function(x,t,*popt)
def plot_norm_error_of_time(function_list,evolutions,filename_list,
h_list=False,
tensor_name=TENSOR_NAME,
order=ORDER,
rk_order=RK_ORDER):
"""
Plots the norm of the error of the tensor extracted using
get_Tij_data_of_index.
function_list is a list of functions for the analytic
solution---one for each filename.
"""
norm_error_title = NORM_ERROR_TITLE
xlabel = XLABEL
norm_error_y_label = NORM_ERROR_Y_LABEL
# First find the norm error as a function
times_list = [None]*len(filename_list)
errors_list = [None]*len(filename_list)
for i in range(len(filename_list)):
times,errors = get_norm_error_of_time(function_list[i],
evolutions[i],order)
times_list[i] = times
errors_list[i] = errors
# Change the font size
mpl.rcParams.update({'font.size': FONTSIZE})
# If an error list is included, rescale by the error
if h_list:
norm_error_title += '\nRescaled by Lattice Spacing'
norm_error_y_label = 'Rescaled '+norm_error_y_label
lines = [plt.loglog(times_list[i],(h_list[i]**(-rk_order))*errors_list[i],linewidth=LINEWIDTH) for i in range(len(times_list))]
else:
lines = [plt.loglog(times_list[i],errors_list[i],linewidth=LINEWIDTH)\
for i in range(len(times_list))]
plt.legend(filename_list)
# Plot parameters
plt.title(norm_error_title.format(order,tensor_name))
plt.xlabel(xlabel)
plt.ylabel(norm_error_y_label.format(order,tensor_name))
# Show plot
plt.show()
return
def main(filename_list):
analytic_function = gaugewave_gxx # analytic function we use
evolutions_list,h_list = get_Tij_data_of_index(T_INDEX[0],T_INDEX[1],
COORD,
filename_list)
analytic_func_list = [calculate_analytic_form(analytic_function,evolutions_list[i][0]) for i in range(len(filename_list))]
plot_norm_error_of_time(analytic_func_list,evolutions_list,filename_list,
False,TENSOR_NAME,ORDER,RK_ORDER)
return
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-2.0 |
bfelbo/deepmoji | deepmoji/finetuning.py | 2 | 23552 | """ Finetuning functions for doing transfer learning to new datasets.
"""
from __future__ import print_function
import sys
import uuid
from time import sleep
import h5py
import math
import pickle
import numpy as np
from keras.layers.wrappers import Bidirectional, TimeDistributed
from sklearn.metrics import f1_score
from keras.callbacks import ModelCheckpoint, EarlyStopping, CSVLogger
from keras.optimizers import Adam
from keras.utils.np_utils import to_categorical
from keras.models import model_from_json
from global_variables import (
FINETUNING_METHODS,
FINETUNING_METRICS,
WEIGHTS_DIR)
from tokenizer import tokenize
from sentence_tokenizer import SentenceTokenizer
from attlayer import AttentionWeightedAverage
def load_benchmark(path, vocab, extend_with=0):
""" Loads the given benchmark dataset.
Tokenizes the texts using the provided vocabulary, extending it with
words from the training dataset if extend_with > 0. Splits them into
three lists: training, validation and testing (in that order).
Also calculates the maximum length of the texts and the
suggested batch_size.
# Arguments:
path: Path to the dataset to be loaded.
vocab: Vocabulary to be used for tokenizing texts.
extend_with: If > 0, the vocabulary will be extended with up to
extend_with tokens from the training set before tokenizing.
# Returns:
A dictionary with the following fields:
texts: List of three lists, containing tokenized inputs for
training, validation and testing (in that order).
labels: List of three lists, containing labels for training,
validation and testing (in that order).
added: Number of tokens added to the vocabulary.
batch_size: Batch size.
maxlen: Maximum length of an input.
"""
# Pre-processing dataset
with open(path) as dataset:
data = pickle.load(dataset)
# Decode data
try:
texts = [unicode(x) for x in data['texts']]
except UnicodeDecodeError:
texts = [x.decode('utf-8') for x in data['texts']]
# Extract labels
labels = [x['label'] for x in data['info']]
batch_size, maxlen = calculate_batchsize_maxlen(texts)
st = SentenceTokenizer(vocab, maxlen)
# Split up dataset. Extend the existing vocabulary with up to extend_with
# tokens from the training dataset.
texts, labels, added = st.split_train_val_test(texts,
labels,
[data['train_ind'],
data['val_ind'],
data['test_ind']],
extend_with=extend_with)
return {'texts': texts,
'labels': labels,
'added': added,
'batch_size': batch_size,
'maxlen': maxlen}
def calculate_batchsize_maxlen(texts):
""" Calculates the maximum length in the provided texts and a suitable
batch size. Rounds up maxlen to the nearest multiple of ten.
# Arguments:
texts: List of inputs.
# Returns:
Batch size,
max length
"""
def roundup(x):
return int(math.ceil(x / 10.0)) * 10
# Calculate max length of sequences considered
# Adjust batch_size accordingly to prevent GPU overflow
lengths = [len(tokenize(t)) for t in texts]
maxlen = roundup(np.percentile(lengths, 80.0))
batch_size = 250 if maxlen <= 100 else 50
return batch_size, maxlen
def finetuning_callbacks(checkpoint_path, patience, verbose):
""" Callbacks for model training.
# Arguments:
checkpoint_path: Where weight checkpoints should be saved.
patience: Number of epochs with no improvement after which
training will be stopped.
# Returns:
Array with training callbacks that can be passed straight into
model.fit() or similar.
"""
cb_verbose = (verbose >= 2)
checkpointer = ModelCheckpoint(monitor='val_loss', filepath=checkpoint_path,
save_best_only=True, verbose=cb_verbose)
earlystop = EarlyStopping(monitor='val_loss', patience=patience,
verbose=cb_verbose)
return [checkpointer, earlystop]
def freeze_layers(model, unfrozen_types=[], unfrozen_keyword=None):
""" Freezes all layers in the given model, except for ones that are
explicitly specified to not be frozen.
# Arguments:
model: Model whose layers should be modified.
unfrozen_types: List of layer types which shouldn't be frozen.
unfrozen_keyword: Name keywords of layers that shouldn't be frozen.
# Returns:
Model with the selected layers frozen.
"""
for l in model.layers:
if len(l.trainable_weights):
trainable = (type(l) in unfrozen_types or
(unfrozen_keyword is not None and unfrozen_keyword in l.name))
change_trainable(l, trainable, verbose=False)
return model
def change_trainable(layer, trainable, verbose=False):
""" Helper method that fixes some of Keras' issues with wrappers and
trainability. Freezes or unfreezes a given layer.
# Arguments:
layer: Layer to be modified.
trainable: Whether the layer should be frozen or unfrozen.
verbose: Verbosity flag.
"""
layer.trainable = trainable
if type(layer) == Bidirectional:
layer.backward_layer.trainable = trainable
layer.forward_layer.trainable = trainable
if type(layer) == TimeDistributed:
layer.backward_layer.trainable = trainable
if verbose:
action = 'Unfroze' if trainable else 'Froze'
print("{} {}".format(action, layer.name))
def find_f1_threshold(y_val, y_pred_val, y_test, y_pred_test,
average='binary'):
""" Choose a threshold for F1 based on the validation dataset
(see https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4442797/
for details on why to find another threshold than simply 0.5)
# Arguments:
y_val: Outputs of the validation dataset.
y_pred_val: Predicted outputs of the validation dataset.
y_test: Outputs of the testing dataset.
y_pred_test: Predicted outputs of the testing dataset.
# Returns:
F1 score for the given data and
the corresponding F1 threshold
"""
thresholds = np.arange(0.01, 0.5, step=0.01)
f1_scores = []
for t in thresholds:
y_pred_val_ind = (y_pred_val > t)
f1_val = f1_score(y_val, y_pred_val_ind, average=average)
f1_scores.append(f1_val)
best_t = thresholds[np.argmax(f1_scores)]
y_pred_ind = (y_pred_test > best_t)
f1_test = f1_score(y_test, y_pred_ind, average=average)
return f1_test, best_t
def relabel(y, current_label_nr, nb_classes):
""" Makes a binary classification for a specific class in a
multi-class dataset.
# Arguments:
y: Outputs to be relabelled.
current_label_nr: Current label number.
nb_classes: Total number of classes.
# Returns:
Relabelled outputs of a given multi-class dataset into a binary
classification dataset.
"""
# Handling binary classification
if nb_classes == 2 and len(y.shape) == 1:
return y
y_new = np.zeros(len(y))
y_cut = y[:, current_label_nr]
label_pos = np.where(y_cut == 1)[0]
y_new[label_pos] = 1
return y_new
def sampling_generator(X_in, y_in, batch_size, epoch_size=25000,
upsample=False, seed=42):
""" Returns a generator that enables larger epochs on small datasets and
has upsampling functionality.
# Arguments:
X_in: Inputs of the given dataset.
y_in: Outputs of the given dataset.
batch_size: Batch size.
epoch_size: Number of samples in an epoch.
upsample: Whether upsampling should be done. This flag should only be
set on binary class problems.
seed: Random number generator seed.
# Returns:
Sample generator.
"""
np.random.seed(seed)
if upsample:
# Should only be used on binary class problems
assert len(y_in.shape) == 1
neg = np.where(y_in == 0)[0]
pos = np.where(y_in == 1)[0]
assert epoch_size % 2 == 0
samples_pr_class = int(epoch_size / 2)
else:
ind = range(len(X_in))
# Keep looping until training halts
while True:
if not upsample:
# Randomly sample observations in a balanced way
sample_ind = np.random.choice(ind, epoch_size, replace=True)
X, y = X_in[sample_ind], y_in[sample_ind]
else:
# Randomly sample observations in a balanced way
sample_neg = np.random.choice(neg, samples_pr_class, replace=True)
sample_pos = np.random.choice(pos, samples_pr_class, replace=True)
X = np.concatenate((X_in[sample_neg], X_in[sample_pos]), axis=0)
y = np.concatenate((y_in[sample_neg], y_in[sample_pos]), axis=0)
# Shuffle to avoid labels being in specific order
# (all negative then positive)
p = np.random.permutation(len(X))
X, y = X[p], y[p]
label_dist = np.mean(y)
assert(label_dist > 0.45)
assert(label_dist < 0.55)
# Hand-off data using batch_size
for i in range(int(epoch_size / batch_size)):
start = i * batch_size
end = min(start + batch_size, epoch_size)
yield (X[start:end], y[start:end])
def finetune(model, texts, labels, nb_classes, batch_size, method,
metric='acc', epoch_size=5000, nb_epochs=1000,
error_checking=True, verbose=1):
""" Compiles and finetunes the given model.
# Arguments:
model: Model to be finetuned
texts: List of three lists, containing tokenized inputs for training,
validation and testing (in that order).
labels: List of three lists, containing labels for training,
validation and testing (in that order).
nb_classes: Number of classes in the dataset.
batch_size: Batch size.
method: Finetuning method to be used. For available methods, see
FINETUNING_METHODS in global_variables.py.
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs. Doesn't matter much as early stopping is used.
metric: Evaluation metric to be used. For available metrics, see
FINETUNING_METRICS in global_variables.py.
error_checking: If set to True, warnings will be printed when the label
list has the wrong dimensions.
verbose: Verbosity flag.
# Returns:
Model after finetuning,
score after finetuning using the provided metric.
"""
if method not in FINETUNING_METHODS:
raise ValueError('ERROR (finetune): Invalid method parameter. '
'Available options: {}'.format(FINETUNING_METHODS))
if metric not in FINETUNING_METRICS:
raise ValueError('ERROR (finetune): Invalid metric parameter. '
'Available options: {}'.format(FINETUNING_METRICS))
(X_train, y_train) = (texts[0], labels[0])
(X_val, y_val) = (texts[1], labels[1])
(X_test, y_test) = (texts[2], labels[2])
checkpoint_path = '{}/deepmoji-checkpoint-{}.hdf5' \
.format(WEIGHTS_DIR, str(uuid.uuid4()))
# Check dimension of labels
if error_checking:
for ls in [y_train, y_val, y_test]:
if not ls.ndim == 1:
print('WARNING (finetune): The dimension of the '
'provided label list does not match the expected '
'value. When using the \'{}\' metric, the labels '
'should be a 1-dimensional array. '
'Input shape was {}'.format(metric, ls.shape))
break
if method in ['last', 'new']:
lr = 0.001
elif method in ['full', 'chain-thaw']:
lr = 0.0001
loss = 'binary_crossentropy' if nb_classes <= 2 \
else 'categorical_crossentropy'
# Freeze layers if using last
if method == 'last':
model = freeze_layers(model, unfrozen_keyword='softmax')
# Compile model, for chain-thaw we compile it later (after freezing)
if method != 'chain-thaw':
adam = Adam(clipnorm=1, lr=lr)
model.compile(loss=loss, optimizer=adam, metrics=['accuracy'])
# Training
if verbose:
print('Method: {}'.format(method))
print('Metric: {}'.format(metric))
print('Classes: {}'.format(nb_classes))
if method == 'chain-thaw':
result = chain_thaw(model, nb_classes=nb_classes,
train=(X_train, y_train),
val=(X_val, y_val),
test=(X_test, y_test),
batch_size=batch_size, loss=loss,
epoch_size=epoch_size,
nb_epochs=nb_epochs,
checkpoint_weight_path=checkpoint_path,
evaluate=metric, verbose=verbose)
else:
result = tune_trainable(model, nb_classes=nb_classes,
train=(X_train, y_train),
val=(X_val, y_val),
test=(X_test, y_test),
epoch_size=epoch_size,
nb_epochs=nb_epochs,
batch_size=batch_size,
checkpoint_weight_path=checkpoint_path,
evaluate=metric, verbose=verbose)
return model, result
def tune_trainable(model, nb_classes, train, val, test, epoch_size,
nb_epochs, batch_size, checkpoint_weight_path,
patience=5, evaluate='acc', verbose=1):
""" Finetunes the given model using the accuracy measure.
# Arguments:
model: Model to be finetuned.
nb_classes: Number of classes in the given dataset.
train: Training data, given as a tuple of (inputs, outputs)
val: Validation data, given as a tuple of (inputs, outputs)
test: Testing data, given as a tuple of (inputs, outputs)
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
batch_size: Batch size.
checkpoint_weight_path: Filepath where weights will be checkpointed to
during training. This file will be rewritten by the function.
patience: Patience for callback methods.
evaluate: Evaluation method to use. Can be 'acc' or 'weighted_f1'.
verbose: Verbosity flag.
# Returns:
Accuracy of the trained model, ONLY if 'evaluate' is set.
"""
# Unpack args
X_train, y_train = train
X_val, y_val = val
X_test, y_test = test
if nb_classes > 2:
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
y_test = to_categorical(y_test)
if verbose:
print("Trainable weights: {}".format(model.trainable_weights))
print("Training..")
# Use sample generator for fixed-size epoch
train_gen = sampling_generator(X_train, y_train,
batch_size, upsample=False)
callbacks = finetuning_callbacks(checkpoint_weight_path, patience, verbose)
steps = int(epoch_size / batch_size)
model.fit_generator(train_gen, steps_per_epoch=steps,
epochs=nb_epochs,
validation_data=(X_val, y_val),
validation_steps=steps,
callbacks=callbacks, verbose=(verbose >= 2))
# Reload the best weights found to avoid overfitting
# Wait a bit to allow proper closing of weights file
sleep(1)
model.load_weights(checkpoint_weight_path, by_name=False)
if verbose >= 2:
print("Loaded weights from {}".format(checkpoint_weight_path))
if evaluate == 'acc':
return evaluate_using_acc(model, X_test, y_test, batch_size=batch_size)
elif evaluate == 'weighted_f1':
return evaluate_using_weighted_f1(model, X_test, y_test, X_val, y_val,
batch_size=batch_size)
def evaluate_using_weighted_f1(model, X_test, y_test, X_val, y_val,
batch_size):
""" Evaluation function using macro weighted F1 score.
# Arguments:
model: Model to be evaluated.
X_test: Inputs of the testing set.
y_test: Outputs of the testing set.
X_val: Inputs of the validation set.
y_val: Outputs of the validation set.
batch_size: Batch size.
# Returns:
Weighted F1 score of the given model.
"""
y_pred_test = np.array(model.predict(X_test, batch_size=batch_size))
y_pred_val = np.array(model.predict(X_val, batch_size=batch_size))
f1_test, _ = find_f1_threshold(y_val, y_pred_val, y_test, y_pred_test,
average='weighted_f1')
return f1_test
def evaluate_using_acc(model, X_test, y_test, batch_size):
""" Evaluation function using accuracy.
# Arguments:
model: Model to be evaluated.
X_test: Inputs of the testing set.
y_test: Outputs of the testing set.
batch_size: Batch size.
# Returns:
Accuracy of the given model.
"""
_, acc = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=0)
return acc
def chain_thaw(model, nb_classes, train, val, test, batch_size,
loss, epoch_size, nb_epochs, checkpoint_weight_path,
patience=5,
initial_lr=0.001, next_lr=0.0001, seed=None,
verbose=1, evaluate='acc'):
""" Finetunes given model using chain-thaw and evaluates using accuracy.
# Arguments:
model: Model to be finetuned.
nb_classes: Number of classes in the given dataset.
train: Training data, given as a tuple of (inputs, outputs)
val: Validation data, given as a tuple of (inputs, outputs)
test: Testing data, given as a tuple of (inputs, outputs)
batch_size: Batch size.
loss: Loss function to be used during training.
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
checkpoint_weight_path: Filepath where weights will be checkpointed to
during training. This file will be rewritten by the function.
initial_lr: Initial learning rate. Will only be used for the first
training step (i.e. the softmax layer)
next_lr: Learning rate for every subsequent step.
seed: Random number generator seed.
verbose: Verbosity flag.
evaluate: Evaluation method to use. Can be 'acc' or 'weighted_f1'.
# Returns:
Accuracy of the finetuned model.
"""
# Unpack args
X_train, y_train = train
X_val, y_val = val
X_test, y_test = test
if nb_classes > 2:
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
y_test = to_categorical(y_test)
if verbose:
print('Training..')
# Use sample generator for fixed-size epoch
train_gen = sampling_generator(X_train, y_train, batch_size,
upsample=False, seed=seed)
callbacks = finetuning_callbacks(checkpoint_weight_path, patience, verbose)
# Train using chain-thaw
train_by_chain_thaw(model=model, train_gen=train_gen,
val_data=(X_val, y_val), loss=loss, callbacks=callbacks,
epoch_size=epoch_size, nb_epochs=nb_epochs,
checkpoint_weight_path=checkpoint_weight_path,
batch_size=batch_size, verbose=verbose)
if evaluate == 'acc':
return evaluate_using_acc(model, X_test, y_test, batch_size=batch_size)
elif evaluate == 'weighted_f1':
return evaluate_using_weighted_f1(model, X_test, y_test, X_val, y_val,
batch_size=batch_size)
def train_by_chain_thaw(model, train_gen, val_data, loss, callbacks, epoch_size,
nb_epochs, checkpoint_weight_path, batch_size,
initial_lr=0.001, next_lr=0.0001, verbose=1):
""" Finetunes model using the chain-thaw method.
This is done as follows:
1) Freeze every layer except the last (softmax) layer and train it.
2) Freeze every layer except the first layer and train it.
3) Freeze every layer except the second etc., until the second last layer.
4) Unfreeze all layers and train entire model.
# Arguments:
model: Model to be trained.
train_gen: Training sample generator.
val_data: Validation data.
loss: Loss function to be used.
callbacks: Training callbacks to be used.
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
checkpoint_weight_path: Where weight checkpoints should be saved.
batch_size: Batch size.
initial_lr: Initial learning rate. Will only be used for the first
training step (i.e. the softmax layer)
next_lr: Learning rate for every subsequent step.
verbose: Verbosity flag.
"""
# Get trainable layers
layers = [layer for layer in model.layers
if len(layer.trainable_weights)]
# Bring last layer to front
layers.insert(0, layers.pop(len(layers) - 1))
# Add None to the end to signify finetuning all layers
layers.append(None)
lr = None
# Finetune each layer one by one and finetune all of them at once
# at the end
for layer in layers:
if lr is None:
lr = initial_lr
elif lr == initial_lr:
lr = next_lr
adam = Adam(clipnorm=1, lr=lr)
# Freeze all except current layer
for _layer in layers:
if _layer is not None:
trainable = _layer == layer or layer is None
change_trainable(_layer, trainable=trainable, verbose=False)
# Verify we froze the right layers
for _layer in model.layers:
if _layer is not None and len(_layer.trainable_weights):
assert _layer.trainable == (_layer == layer) or layer is None
model.cache = False
model.compile(loss=loss, optimizer=adam, metrics=['accuracy'])
model.cache = True
if verbose:
if layer is None:
print('Finetuning all layers')
else:
print('Finetuning {}'.format(layer.name))
steps = int(epoch_size / batch_size)
model.fit_generator(train_gen, steps_per_epoch=steps,
epochs=nb_epochs, validation_data=val_data,
callbacks=callbacks, verbose=(verbose >= 2))
# Reload the best weights found to avoid overfitting
# Wait a bit to allow proper closing of weights file
sleep(1)
model.load_weights(checkpoint_weight_path, by_name=False)
if verbose >= 2:
print("Loaded weights from {}".format(checkpoint_weight_path))
| mit |
niasand/cool-config | pandas_learn/pivot_table.py | 1 | 8064 | # -*- coding:utf-8 -*-
# @Author: Zhiwei.Yang
# @Create At: 2019-02-23 19:22:08
# @Last Modified At: 2019-02-23 19:22:08
import pandas as pd
import numpy as np
lc = pd.read_csv("phone_data.csv")
df = pd.pivot_table(lc,index=["month"],values=["duration"],aggfunc=np.sum)
df3 = pd.pivot_table(lc,index=["month"],values=["duration","date"],aggfunc=[np.sum,np.mean,len],fill_value=0)
df1 = pd.pivot_table(lc,index=["month","date"],values=["duration"],columns=["index"])
df2 = pd.pivot_table(lc,index=["month","date"],values=["duration","network_type"])
"""
使用pandas创建数据透视表
2016年8月4日 BY 蓝鲸 2 COMMENTS
数据透视表是Excel中最常用的数据汇总工具,它可以根据一个或多个制定的维度对数据进行聚合。在python中同样可以通过pandas.pivot_table函数来实现这些功能。本篇文章将介绍 pandas.pivot_table函数与Excel数据透视表之间的联系,以及具体的使用方法。文章中的数据源来自Lending Club 2017-2011年的公开数据。
Data-Analysis-Tools-Shopping-Cart-
pandas数据透视表函数
pandas.pivot_table函数中包含四个主要的变量,以及一些可选择使用的参数。四个主要的变量分别是数据源data,行索引index,列columns,和数值values。可选择使用的参数包括数值的汇总 方式,NaN值的处理方式,以及是否显示汇总行数据等。下面是Pandas官网给出的函数说明。
pandas官网解释
我们将pandas.pivot_table函数与Excel的数据透视表界面做了一个对比,并用不同的颜色和连线画出了两者之间的联系。在其中可以发现pandas.pivot_table的行索引index,列,和数值分别对 应了Excel数据透视表中的行,列和值三个部分。在实际的操作中Excel是将字段拖拽到相应的字段区间中,而在pandas.pivot_table中只需要将字段的的名称输入到等号后面就可以了。下面我们 来看下pandas.pivot_table具体的使用方法。
Excel数据透视表3
首先导入需要使用的numpy和pandas功能库,numpy用于数值计算,Pandas是基于numpy构建的用于科学计算的功能库,pandas.pivot_table是Pandas库(pd)中的函数。然后读取Lending Club数据 ,并生成名为lc的数据表。
1
2
3
import pandas as pd
import numpy as np
lc=pd.DataFrame(pd.read_csv('LoanStats3a.csv',header=1))
创建简单的数据透视表
我们选择Lending Club数据表中的贷款期限和贷款总额字段来创建一个简单的数据透视表。按贷款期限维度对贷款总额进行聚合,将贷款期限字段(term)放在行索引lndex中,贷款总额字段 (loan_amnt)放在值values中,生成按不同贷款期限维度聚合的贷款总额数据。这里需要说明的是在默认情况下pandas.pivot_table对指标的汇总方式是计算平均值。 因此下面的表中显示的是不 同贷款期限的贷款平均值数据。这个简单的数据透视表只有一个维度和一个指标。下面我们将为这个数据透视表增加更多的维度和指标,并增加更多的指标汇总计算方式。
1
pd.pivot_table(lc,index=["term"],values=["loan_amnt"])
创建简单的数据透视表
增加一个行维度(index)
在贷款期限的维度上增加贷款用户等级维度,创建一个双维度的数据透视表,在pandas.pivot_table的行索引index中增加贷款用户等级字段(grade)。这样在行索引维度index中共包含了两个维 度,主维度贷款期限(term)和次级维度贷款用户等级(grade)。指标是按不同贷款期限下贷款用户等级分布进行汇总贷款金额平均值。与之前相比指标数据经过次级维度的细分变的更加精细。
1
pd.pivot_table(lc,index=["term","grade"],values=["loan_amnt"])
增加一个行维度(index)
通过调整pandas.pivot_table函数中不同维度的位置可以更改数据透视表中维度的层级,以及数据的显示方式。这里我们将前面代码行索引中两个字段位置互换,此时贷款用户等级(grade)成了主维度,贷款期限(term)变成了次级维度。
1
pd.pivot_table(lc,index=["grade","term"],values=["loan_amnt"])
增加一个行维度(index)1
增加一个值变量(value)
除了增加次级维度以外,还可以增加需要汇总的数据值。在前面数据透视表的基础上我们增加总利息字段作为第二个汇总值。方法与前面增加次级维度很相似,将需要增加的字段放在值values中即可。下面是具体的代码和生成的数据透视表,其中total_rec_int是新增的值values变量。这里需要再次说明的是,默认情况下pandas.pivot_table按平均值对数据进行汇总。
1
pd.pivot_table(lc,index=["grade","term"],values=["loan_amnt","total_rec_int"])
增加一个值变量(value)
更改数值汇总方式
若要更改pandas.pivot_table对值values的汇总方式需要在代码中进行设置,下面将贷款总额和总利息字段的汇总方式改为求和。方法是在代码中加入aggfunc=np.sum。新生成的数据透视表中值字段的计算方式就由之前的平均值改为了求和值。
1
pd.pivot_table(lc,index=["grade","term"],values["loan_amnt","total_rec_int"],aggfunc=np.sum)
更改数值汇总方式
增加数值汇总方式
除了可以对值变量values计算平均值和求和以外,还可以进行计数。下面我们在上面数据透视表的基础上分别对贷款总额和总利息字段进行求和,平均值和技术的计算。具体方法是代码中增加以下内容aggfunc=[np.sum,np.mean,len]),aggfunc是汇总方式,np.sum表示求和,np.mean表示计算平均值,len表示计数。在下面新创建的数据透视表中可以看到,求和sum部分,平均值mean部
分和计数len部分的计算结果。
1
pd.pivot_table(lc,index=["grade","term"],values=["loan_amnt","total_rec_int"],aggfunc=[np.sum,np.mean,len])
增加数值汇总方式
如果数据表中包含有NaN值,并且在之前的清洗中没有进行处理,也可以在生成数据透视表的过程中进行处理或替换。在pandas.pivot_table函数中有两种处理NaN值的方式,第一种是将NaN值替换为0。第二种为放弃NaN值,也就是说包含有NaN值的数据条目不参加计算。这里我们使用第一种方法,将NaN值替换为0。具体方法是在代码中添加以下部分fill_value=0。
1
pd.pivot_table(lc,index=["grade","term"],values=["loan_amnt","total_rec_int"],aggfunc=[np.sum,np.mean,len],fill_value=0)
增加数值汇总方式1
增加一个列维度(columns)
pandas.pivot_table函数也支持列维度。在Excel中需要将对应的字段拖到列区域中,在Pandas中的方法是增加列columns,并将对应的字段名称放在列columns变量的值中。下面是具体的代码,其中columns=[“home_ownership”]是新增加的部分,表示在数据表中增加列维度home_ownership。
1
pd.pivot_table(lc,index=["grade"],values=["loan_amnt"],columns=["home_ownership"],aggfunc=[np.sum],fill_value=0)
增加一个列维度(columns)
增加多个列维度
与行索引一样,列columns中也可以增加多个维度,方法与增加行维度和值一样,这里不再赘述。下面是具体的代码,其中columns=[“home_ownership”,”term”]是发生变化的部分,表示列中新增了贷款期限term维度。home_owership为主维度,term为次级维度。
1
pd.pivot_table(lc,index=["grade"],values=["loan_amnt"],columns=["home_ownership","term"],aggfunc=[np.sum],fill_value=0)
增加多个列维度
增加数据汇总值
pandas.pivot_table函数中的margins参数用于增加数据透视表的汇总值。默认情况下margins的状态为False。需要增加透视表的汇总值时将margins值改为True即可。此时数据透视表将显示不同维度下数据的汇总值。汇总值的计算方式以aggfunc的一致。换句话说,如果aggfunc中设置的是求和,那么汇总值也是求和值。
Read more: http://bluewhale.cc/2016-08-04/use-pandas-create-a-pivot-table.html#ixzz5gM9m31Yl
""" | mit |
FFMG/myoddweb.piger | myodd/boost/libs/numeric/odeint/performance/plot_result.py | 43 | 2225 | """
Copyright 2011-2014 Mario Mulansky
Copyright 2011-2014 Karsten Ahnert
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or
copy at http://www.boost.org/LICENSE_1_0.txt)
"""
import numpy as np
from matplotlib import pyplot as plt
plt.rc("font", size=16)
def get_runtime_from_file(filename):
gcc_perf_file = open(filename, 'r')
for line in gcc_perf_file:
if "Minimal Runtime:" in line:
return float(line.split(":")[-1])
t_gcc = [get_runtime_from_file("perf_workbook/odeint_rk4_array_gcc.perf"),
get_runtime_from_file("perf_ariel/odeint_rk4_array_gcc.perf"),
get_runtime_from_file("perf_lyra/odeint_rk4_array_gcc.perf")]
t_intel = [get_runtime_from_file("perf_workbook/odeint_rk4_array_intel.perf"),
get_runtime_from_file("perf_ariel/odeint_rk4_array_intel.perf"),
get_runtime_from_file("perf_lyra/odeint_rk4_array_intel.perf")]
t_gfort = [get_runtime_from_file("perf_workbook/rk4_gfort.perf"),
get_runtime_from_file("perf_ariel/rk4_gfort.perf"),
get_runtime_from_file("perf_lyra/rk4_gfort.perf")]
t_c_intel = [get_runtime_from_file("perf_workbook/rk4_c_intel.perf"),
get_runtime_from_file("perf_ariel/rk4_c_intel.perf"),
get_runtime_from_file("perf_lyra/rk4_c_intel.perf")]
print t_c_intel
ind = np.arange(3) # the x locations for the groups
width = 0.15 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
rects1 = ax.bar(ind, t_gcc, width, color='b', label="odeint gcc")
rects2 = ax.bar(ind+width, t_intel, width, color='g', label="odeint intel")
rects3 = ax.bar(ind+2*width, t_c_intel, width, color='y', label="C intel")
rects4 = ax.bar(ind+3*width, t_gfort, width, color='c', label="gfort")
ax.axis([-width, 2.0+5*width, 0.0, 0.85])
ax.set_ylabel('Runtime (s)')
ax.set_title('Performance for integrating the Lorenz system')
ax.set_xticks(ind + 1.5*width)
ax.set_xticklabels(('Core i5-3210M\n3.1 GHz',
'Xeon E5-2690\n3.8 GHz',
'Opteron 8431\n 2.4 GHz'))
ax.legend(loc='upper left', prop={'size': 16})
plt.savefig("perf.pdf")
plt.savefig("perf.png", dpi=50)
plt.show()
| gpl-2.0 |
gnu-sandhi/sandhi | modules/gr36/gnuradio-core/src/examples/volk_benchmark/volk_plot.py | 78 | 6117 | #!/usr/bin/env python
import sys, math
import argparse
from volk_test_funcs import *
try:
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
sys.stderr.write("Could not import Matplotlib (http://matplotlib.sourceforge.net/)\n")
sys.exit(1)
def main():
desc='Plot Volk performance results from a SQLite database. ' + \
'Run one of the volk tests first (e.g, volk_math.py)'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-D', '--database', type=str,
default='volk_results.db',
help='Database file to read data from [default: %(default)s]')
parser.add_argument('-E', '--errorbars',
action='store_true', default=False,
help='Show error bars (1 standard dev.)')
parser.add_argument('-P', '--plot', type=str,
choices=['mean', 'min', 'max'],
default='mean',
help='Set the type of plot to produce [default: %(default)s]')
parser.add_argument('-%', '--percent', type=str,
default=None, metavar="table",
help='Show percent difference to the given type [default: %(default)s]')
args = parser.parse_args()
# Set up global plotting properties
matplotlib.rcParams['figure.subplot.bottom'] = 0.2
matplotlib.rcParams['figure.subplot.top'] = 0.95
matplotlib.rcParams['figure.subplot.right'] = 0.98
matplotlib.rcParams['ytick.labelsize'] = 16
matplotlib.rcParams['xtick.labelsize'] = 16
matplotlib.rcParams['legend.fontsize'] = 18
# Get list of tables to compare
conn = create_connection(args.database)
tables = list_tables(conn)
M = len(tables)
# Colors to distinguish each table in the bar graph
# More than 5 tables will wrap around to the start.
colors = ['b', 'r', 'g', 'm', 'k']
# Set up figure for plotting
f0 = plt.figure(0, facecolor='w', figsize=(14,10))
s0 = f0.add_subplot(1,1,1)
# Create a register of names that exist in all tables
tmp_regs = []
for table in tables:
# Get results from the next table
res = get_results(conn, table[0])
tmp_regs.append(list())
for r in res:
try:
tmp_regs[-1].index(r['kernel'])
except ValueError:
tmp_regs[-1].append(r['kernel'])
# Get only those names that are common in all tables
name_reg = tmp_regs[0]
for t in tmp_regs[1:]:
name_reg = list(set(name_reg) & set(t))
name_reg.sort()
# Pull the data out for each table into a dictionary
# we can ref the table by it's name and the data associated
# with a given kernel in name_reg by it's name.
# This ensures there is no sorting issue with the data in the
# dictionary, so the kernels are plotted against each other.
table_data = dict()
for i,table in enumerate(tables):
# Get results from the next table
res = get_results(conn, table[0])
data = dict()
for r in res:
data[r['kernel']] = r
table_data[table[0]] = data
if args.percent is not None:
for i,t in enumerate(table_data):
if args.percent == t:
norm_data = []
for name in name_reg:
if(args.plot == 'max'):
norm_data.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
norm_data.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
norm_data.append(table_data[t][name]['avg'])
# Plot the results
x0 = xrange(len(name_reg))
i = 0
for t in (table_data):
ydata = []
stds = []
for name in name_reg:
stds.append(math.sqrt(table_data[t][name]['var']))
if(args.plot == 'max'):
ydata.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
ydata.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
ydata.append(table_data[t][name]['avg'])
if args.percent is not None:
ydata = [-100*(y-n)/y for y,n in zip(ydata,norm_data)]
if(args.percent != t):
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80/(M-1)
x1 = [x + i*wdth for x in x0]
i += 1
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80/M
x1 = [x + i*wdth for x in x0]
i += 1
if(args.errorbars is False):
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
s0.bar(x1, ydata, width=wdth,
yerr=stds,
color=colors[i%M], label=t,
edgecolor='k', linewidth=2,
error_kw={"ecolor": 'k', "capsize":5,
"linewidth":2})
nitems = res[0]['nitems']
if args.percent is None:
s0.set_ylabel("Processing time (sec) [{0:G} items]".format(nitems),
fontsize=22, fontweight='bold',
horizontalalignment='center')
else:
s0.set_ylabel("% Improvement over {0} [{1:G} items]".format(
args.percent, nitems),
fontsize=22, fontweight='bold')
s0.legend()
s0.set_xticks(x0)
s0.set_xticklabels(name_reg)
for label in s0.xaxis.get_ticklabels():
label.set_rotation(45)
label.set_fontsize(16)
plt.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
jpautom/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
0asa/sparklingpandas | sparklingpandas/groupby.py | 2 | 9829 | """Provide wrapper around the grouped result from L{PRDD}"""
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparklingpandas.utils import add_pyspark_path
from sparklingpandas.prdd import PRDD
add_pyspark_path()
import pandas
import numpy as np
class GroupBy:
"""An RDD with key value pairs, where each value is a Panda's dataframe and
the key is the result of the group. Supports many of the same operations
as a Panda's GroupBy."""
def __init__(self, rdd, *args, **kwargs):
"""Construct a groupby object providing the functions on top of the
provided RDD. We keep the base RDD so if someone calls aggregate we
do things more intelligently.
"""
def extract_keys(groupedFrame):
for key, group in groupedFrame:
yield (key, group)
def group_and_extract(frame):
return extract_keys(frame.groupby(*args, **kwargs))
self._sort = kwargs.get("sort", True)
self._baseRDD = rdd
self._distributedRDD = rdd.flatMap(group_and_extract)
self._mergedRDD = self._sortIfNeeded(
self._group(self._distributedRDD))
self._myargs = args
self._mykwargs = kwargs
def _sortIfNeeded(self, rdd):
"""Sort by key if we need to."""
if self._sort:
return rdd.sortByKey()
else:
return rdd
def _group(self, rdd):
"""Group together the values with the same key."""
return rdd.reduceByKey(lambda x, y: x.append(y))
def _cache(self):
"""Cache the grouped RDD. This is useful if you have multiple
computations to run on the result. This is a SparklingPandas
extension.
"""
self._mergedRDD.cache()
def __len__(self):
"""Number of groups."""
return self._mergedRDD.count()
def get_group(self, name):
"""Returns a concrete DataFrame for provided group name."""
self._mergedRDD.lookup(name)
def __iter__(self):
"""Returns an iterator of (name, dataframe) to the local machine.
"""
return self._mergedRDD.collect().__iter__()
def collect(self):
"""Return a list of the elements. This is a SparklingPanda extension
because Spark gives us back a list we convert to an iterator in
__iter__ so it allows us to skip the round trip through iterators.
"""
return self._mergedRDD.collect()
@property
def groups(self):
"""Returns dict {group name -> group labels}."""
def extract_group_labels(frame):
return (frame[0], frame[1].index.values)
return self._mergedRDD.map(extract_group_labels).collectAsMap()
@property
def ngroups(self):
"""Number of groups."""
return self._mergedRDD.count()
@property
def indices(self):
"""Returns dict {group name -> group indices}."""
def extract_group_indices(frame):
return (frame[0], frame[1].index)
return self._mergedRDD.map(extract_group_indices).collectAsMap()
def median(self):
"""Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
"""
return PRDD.fromRDD(
self._regroup_mergedRDD().values().map(
lambda x: x.median()))
def mean(self):
"""Compute mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
"""
# TODO(holden): use stats counter
return PRDD.fromRDD(
self._regroup_mergedRDD().values().map(
lambda x: x.mean()))
def var(self, ddof=1):
"""Compute standard deviation of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
"""
# TODO(holden): use stats counter
return PRDD.fromRDD(
self._regroup_mergedRDD().values().map(
lambda x: x.var(
ddof=ddof)))
def sum(self):
"""Compute the sum for each group."""
myargs = self._myargs
mykwargs = self._mykwargs
def create_combiner(x):
return x.groupby(*myargs, **mykwargs).sum()
def merge_value(x, y):
return pandas.concat([x, create_combiner(y)])
def merge_combiner(x, y):
return x + y
rddOfSum = self._sortIfNeeded(self._distributedRDD.combineByKey(
create_combiner,
merge_value,
merge_combiner)).values()
return PRDD.fromRDD(rddOfSum)
def min(self):
"""Compute the min for each group."""
myargs = self._myargs
mykwargs = self._mykwargs
def create_combiner(x):
return x.groupby(*myargs, **mykwargs).min()
def merge_value(x, y):
return x.append(create_combiner(y)).min()
def merge_combiner(x, y):
return x.append(y).min(level=0)
rddOfMin = self._sortIfNeeded(self._distributedRDD.combineByKey(
create_combiner,
merge_value,
merge_combiner)).values()
return PRDD.fromRDD(rddOfMin)
def max(self):
"""Compute the max for each group."""
myargs = self._myargs
mykwargs = self._mykwargs
def create_combiner(x):
return x.groupby(*myargs, **mykwargs).max()
def merge_value(x, y):
return x.append(create_combiner(y)).max()
def merge_combiner(x, y):
return x.append(y).max(level=0)
rddOfMax = self._sortIfNeeded(self._distributedRDD.combineByKey(
create_combiner,
merge_value,
merge_combiner)).values()
return PRDD.fromRDD(rddOfMax)
def first(self):
"""
Pull out the first from each group. Note: this is different than
Spark's first.
"""
myargs = self._myargs
mykwargs = self._mykwargs
def create_combiner(x):
return x.groupby(*myargs, **mykwargs).first()
def merge_value(x, y):
return create_combiner(x)
def merge_combiner(x, y):
return x
rddOfFirst = self._sortIfNeeded(self._distributedRDD.combineByKey(
create_combiner,
merge_value,
merge_combiner)).values()
return PRDD.fromRDD(rddOfFirst)
def last(self):
"""Pull out the last from each group."""
myargs = self._myargs
mykwargs = self._mykwargs
def create_combiner(x):
return x.groupby(*myargs, **mykwargs).last()
def merge_value(x, y):
return create_combiner(y)
def merge_combiner(x, y):
return y
rddOfLast = self._sortIfNeeded(self._distributedRDD.combineByKey(
create_combiner,
merge_value,
merge_combiner)).values()
return PRDD.fromRDD(rddOfLast)
def _regroup_mergedRDD(self):
"""A common pattern is we want to call groupby again on the dataframes
so we can use the groupby functions.
"""
myargs = self._myargs
mykwargs = self._mykwargs
def regroup(df):
return df.groupby(*myargs, **mykwargs)
return self._mergedRDD.mapValues(regroup)
def nth(self, n, *args, **kwargs):
"""Take the nth element of each grouby."""
# TODO: Stop collecting the entire frame for each key.
myargs = self._myargs
mykwargs = self._mykwargs
nthRDD = self._regroup_mergedRDD().mapValues(
lambda r: r.nth(
n, *args, **kwargs)).values()
return PRDD.fromRDD(nthRDD)
def aggregate(self, f):
"""Apply the aggregation function.
Note: This implementation does note take advantage of partial
aggregation.
"""
return PRDD.fromRDD(
self._regroup_mergedRDD().values().map(
lambda g: g.aggregate(f)))
def agg(self, f):
return self.aggregate(f)
def apply(self, func, *args, **kwargs):
"""Apply the provided function and combine the results together in the
same way as apply from groupby in pandas.
This returns a PRDD.
"""
def key_by_index(data):
"""Key each row by its index.
"""
# TODO: Is there a better way to do this?
for key, row in data.iterrows():
yield (key, pandas.DataFrame.from_dict(dict([(key, row)]),
orient='index'))
myargs = self._myargs
mykwargs = self._mykwargs
regroupedRDD = self._distributedRDD.mapValues(
lambda data: data.groupby(*myargs, **mykwargs))
appliedRDD = regroupedRDD.map(
lambda key_data: key_data[1].apply(func, *args, **kwargs))
reKeyedRDD = appliedRDD.flatMap(key_by_index)
prdd = self._sortIfNeeded(reKeyedRDD).values()
return PRDD.fromRDD(prdd)
| apache-2.0 |
andaag/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
IndraVikas/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
ltiao/scikit-learn | sklearn/preprocessing/label.py | 16 | 26702 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
pr-omethe-us/PyKED | setup.py | 2 | 2445 | from setuptools import setup
from codecs import open
from os import path
import sys
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'pyked', '_version.py')) as version_file:
exec(version_file.read())
with open(path.join(here, 'README.md')) as readme_file:
readme = readme_file.read()
with open(path.join(here, 'CHANGELOG.md')) as changelog_file:
changelog = changelog_file.read()
with open(path.join(here, 'CITATION.md')) as citation_file:
citation = citation_file.read()
long_description = readme + '\n\n' + changelog + '\n\n' + citation
install_requires = [
'pyyaml>=3.12,<4.0',
'cerberus>=1.0.0,<1.2',
'pint>=0.7.2,<0.9',
'numpy>=1.11.0,<2.0',
'habanero>=0.6.0',
'uncertainties>=3.0.1,<3.1',
]
tests_require = [
'pytest>=3.2.0',
'pytest-cov',
]
extras_require = {
'dataframes': ['pandas >=0.22.0,<0.23'],
}
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
setup_requires = ['pytest-runner'] if needs_pytest else []
setup(
name='pyked',
version=__version__,
description='Package for manipulating Chemical Kinetics Experimental Data (ChemKED) files.',
long_description=long_description,
long_description_content_type='text/markdown',
author='Kyle Niemeyer',
author_email='[email protected]',
url='https://github.com/pr-omethe-us/PyKED',
packages=['pyked', 'pyked.tests'],
package_dir={'pyked': 'pyked'},
include_package_data=True,
install_requires=install_requires,
license='BSD-3-Clause',
zip_safe=False,
keywords=['chemical kinetics'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Chemistry',
],
tests_require=tests_require,
extras_require=extras_require,
setup_requires=setup_requires,
python_requires='~=3.5',
entry_points={
'console_scripts': ['convert_ck=pyked.converters:main',
'respth2ck=pyked.converters:respth2ck',
'ck2respth=pyked.converters:ck2respth',
],
}
)
| bsd-3-clause |
Winand/pandas | pandas/tests/frame/test_api.py | 1 | 14736 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
# pylint: disable-msg=W0612,E1101
from copy import deepcopy
import sys
from distutils.version import LooseVersion
from pandas.compat import range, lrange, long
from pandas import compat
from numpy.random import randn
import numpy as np
from pandas import DataFrame, Series, date_range, timedelta_range
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class SharedWithSparse(object):
"""
A collection of tests DataFrame and SparseDataFrame can share.
In generic tests on this class, use ``self._assert_frame_equal()`` and
``self._assert_series_equal()`` which are implemented in sub-classes
and dispatch correctly.
"""
def _assert_frame_equal(self, left, right):
"""Dispatch to frame class dependent assertion"""
raise NotImplementedError
def _assert_series_equal(self, left, right):
"""Dispatch to series class dependent assertion"""
raise NotImplementedError
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
for attr in ('index', 'columns'):
ind = getattr(self.frame, attr)
ind.name = None
cp = self.frame.copy()
getattr(cp, attr).name = 'foo'
assert getattr(self.frame, attr).name is None
def test_getitem_pop_assign_name(self):
s = self.frame['A']
assert s.name == 'A'
s = self.frame.pop('A')
assert s.name == 'A'
s = self.frame.loc[:, 'B']
assert s.name == 'B'
s2 = s.loc[:]
assert s2.name == 'B'
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
tm.assert_almost_equal(result, expected)
def test_add_prefix_suffix(self):
with_prefix = self.frame.add_prefix('foo#')
expected = pd.Index(['foo#%s' % c for c in self.frame.columns])
tm.assert_index_equal(with_prefix.columns, expected)
with_suffix = self.frame.add_suffix('#foo')
expected = pd.Index(['%s#foo' % c for c in self.frame.columns])
tm.assert_index_equal(with_suffix.columns, expected)
with_pct_prefix = self.frame.add_prefix('%')
expected = pd.Index(['%{}'.format(c) for c in self.frame.columns])
tm.assert_index_equal(with_pct_prefix.columns, expected)
with_pct_suffix = self.frame.add_suffix('%')
expected = pd.Index(['{}%'.format(c) for c in self.frame.columns])
tm.assert_index_equal(with_pct_suffix.columns, expected)
def test_get_axis(self):
f = self.frame
assert f._get_axis_number(0) == 0
assert f._get_axis_number(1) == 1
assert f._get_axis_number('index') == 0
assert f._get_axis_number('rows') == 0
assert f._get_axis_number('columns') == 1
assert f._get_axis_name(0) == 'index'
assert f._get_axis_name(1) == 'columns'
assert f._get_axis_name('index') == 'index'
assert f._get_axis_name('rows') == 'index'
assert f._get_axis_name('columns') == 'columns'
assert f._get_axis(0) is f.index
assert f._get_axis(1) is f.columns
tm.assert_raises_regex(
ValueError, 'No axis named', f._get_axis_number, 2)
tm.assert_raises_regex(
ValueError, 'No axis.*foo', f._get_axis_name, 'foo')
tm.assert_raises_regex(
ValueError, 'No axis.*None', f._get_axis_name, None)
tm.assert_raises_regex(ValueError, 'No axis named',
f._get_axis_number, None)
def test_keys(self):
getkeys = self.frame.keys
assert getkeys() is self.frame.columns
def test_column_contains_typeerror(self):
try:
self.frame.columns in self.frame
except TypeError:
pass
def test_not_hashable(self):
df = self.klass([1])
pytest.raises(TypeError, hash, df)
pytest.raises(TypeError, hash, self.empty)
def test_new_empty_index(self):
df1 = self.klass(randn(0, 3))
df2 = self.klass(randn(0, 3))
df1.index.name = 'foo'
assert df2.index.name is None
def test_array_interface(self):
with np.errstate(all='ignore'):
result = np.sqrt(self.frame)
assert isinstance(result, type(self.frame))
assert result.index is self.frame.index
assert result.columns is self.frame.columns
self._assert_frame_equal(result, self.frame.apply(np.sqrt))
def test_get_agg_axis(self):
cols = self.frame._get_agg_axis(0)
assert cols is self.frame.columns
idx = self.frame._get_agg_axis(1)
assert idx is self.frame.index
pytest.raises(ValueError, self.frame._get_agg_axis, 2)
def test_nonzero(self):
assert self.empty.empty
assert not self.frame.empty
assert not self.mixed_frame.empty
# corner case
df = DataFrame({'A': [1., 2., 3.],
'B': ['a', 'b', 'c']},
index=np.arange(3))
del df['A']
assert not df.empty
def test_iteritems(self):
df = self.klass([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
for k, v in compat.iteritems(df):
assert isinstance(v, self.klass._constructor_sliced)
def test_items(self):
# issue #17213, #13918
cols = ['a', 'b', 'c']
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=cols)
for c, (k, v) in zip(cols, df.items()):
assert c == k
assert isinstance(v, Series)
assert (df[k] == v).all()
def test_iter(self):
assert tm.equalContents(list(self.frame), self.frame.columns)
def test_iterrows(self):
for k, v in self.frame.iterrows():
exp = self.frame.loc[k]
self._assert_series_equal(v, exp)
for k, v in self.mixed_frame.iterrows():
exp = self.mixed_frame.loc[k]
self._assert_series_equal(v, exp)
def test_itertuples(self):
for i, tup in enumerate(self.frame.itertuples()):
s = self.klass._constructor_sliced(tup[1:])
s.name = tup[0]
expected = self.frame.iloc[i, :].reset_index(drop=True)
self._assert_series_equal(s, expected)
df = self.klass({'floats': np.random.randn(5),
'ints': lrange(5)}, columns=['floats', 'ints'])
for tup in df.itertuples(index=False):
assert isinstance(tup[1], (int, long))
df = self.klass(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[['a', 'a']]
assert (list(dfaa.itertuples()) ==
[(0, 1, 1), (1, 2, 2), (2, 3, 3)])
# repr with be int/long on 32-bit/windows
if not (compat.is_platform_windows() or compat.is_platform_32bit()):
assert (repr(list(df.itertuples(name=None))) ==
'[(0, 1, 4), (1, 2, 5), (2, 3, 6)]')
tup = next(df.itertuples(name='TestName'))
if sys.version >= LooseVersion('2.7'):
assert tup._fields == ('Index', 'a', 'b')
assert (tup.Index, tup.a, tup.b) == tup
assert type(tup).__name__ == 'TestName'
df.columns = ['def', 'return']
tup2 = next(df.itertuples(name='TestName'))
assert tup2 == (0, 1, 4)
if sys.version >= LooseVersion('2.7'):
assert tup2._fields == ('Index', '_1', '_2')
df3 = DataFrame(dict(('f' + str(i), [i]) for i in range(1024)))
# will raise SyntaxError if trying to create namedtuple
tup3 = next(df3.itertuples())
assert not hasattr(tup3, '_fields')
assert isinstance(tup3, tuple)
def test_len(self):
assert len(self.frame) == len(self.frame.index)
def test_as_matrix(self):
frame = self.frame
mat = frame.as_matrix()
frameCols = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frameCols[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = self.mixed_frame.as_matrix(['foo', 'A'])
assert mat[0, 0] == 'bar'
df = self.klass({'real': [1, 2, 3], 'complex': [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 0] == 1j
# single block corner case
mat = self.frame.as_matrix(['A', 'B'])
expected = self.frame.reindex(columns=['A', 'B']).values
assert_almost_equal(mat, expected)
def test_transpose(self):
frame = self.frame
dft = frame.T
for idx, series in compat.iteritems(dft):
for col, value in compat.iteritems(series):
if np.isnan(value):
assert np.isnan(frame[col][idx])
else:
assert value == frame[col][idx]
# mixed type
index, data = tm.getMixedTypeDict()
mixed = self.klass(data, index=index)
mixed_T = mixed.T
for col, s in compat.iteritems(mixed_T):
assert s.dtype == np.object_
def test_swapaxes(self):
df = self.klass(np.random.randn(10, 5))
self._assert_frame_equal(df.T, df.swapaxes(0, 1))
self._assert_frame_equal(df.T, df.swapaxes(1, 0))
self._assert_frame_equal(df, df.swapaxes(0, 0))
pytest.raises(ValueError, df.swapaxes, 2, 5)
def test_axis_aliases(self):
f = self.frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis='index')
assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis='columns')
assert_series_equal(result, expected)
def test_more_asMatrix(self):
values = self.mixed_frame.as_matrix()
assert values.shape[1] == len(self.mixed_frame.columns)
def test_repr_with_mi_nat(self):
df = self.klass({'X': [1, 2]},
index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']])
res = repr(df)
exp = ' X\nNaT a 1\n2013-01-01 b 2'
assert res == exp
def test_iteritems_names(self):
for k, v in compat.iteritems(self.mixed_frame):
assert v.name == k
def test_series_put_names(self):
series = self.mixed_frame._series
for k, v in compat.iteritems(series):
assert v.name == k
def test_empty_nonzero(self):
df = self.klass([1, 2, 3])
assert not df.empty
df = self.klass(index=[1], columns=[1])
assert not df.empty
df = self.klass(index=['a', 'b'], columns=['c', 'd']).dropna()
assert df.empty
assert df.T.empty
empty_frames = [self.klass(),
self.klass(index=[1]),
self.klass(columns=[1]),
self.klass({1: []})]
for df in empty_frames:
assert df.empty
assert df.T.empty
def test_with_datetimelikes(self):
df = self.klass({'A': date_range('20130101', periods=10),
'B': timedelta_range('1 day', periods=10)})
t = df.T
result = t.get_dtype_counts()
expected = Series({'object': 10})
tm.assert_series_equal(result, expected)
class TestDataFrameMisc(SharedWithSparse, TestData):
klass = DataFrame
# SharedWithSparse tests use generic, klass-agnostic assertion
_assert_frame_equal = staticmethod(assert_frame_equal)
_assert_series_equal = staticmethod(assert_series_equal)
def test_values(self):
self.frame.values[:, 0] = 5.
assert (self.frame.values[:, 0] == 5).all()
def test_deepcopy(self):
cp = deepcopy(self.frame)
series = cp['A']
series[:] = 10
for idx, value in compat.iteritems(series):
assert self.frame['A'][idx] != value
def test_transpose_get_view(self):
dft = self.frame.T
dft.values[:, 5:10] = 5
assert (self.frame.values[5:10] == 5).all()
def test_inplace_return_self(self):
# re #1893
data = DataFrame({'a': ['foo', 'bar', 'baz', 'qux'],
'b': [0, 0, 1, 1],
'c': [1, 2, 3, 4]})
def _check_f(base, f):
result = f(base)
assert result is None
# -----DataFrame-----
# set_index
f = lambda x: x.set_index('a', inplace=True)
_check_f(data.copy(), f)
# reset_index
f = lambda x: x.reset_index(inplace=True)
_check_f(data.set_index('a'), f)
# drop_duplicates
f = lambda x: x.drop_duplicates(inplace=True)
_check_f(data.copy(), f)
# sort
f = lambda x: x.sort_values('b', inplace=True)
_check_f(data.copy(), f)
# sort_index
f = lambda x: x.sort_index(inplace=True)
_check_f(data.copy(), f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(data.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(data.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
_check_f(data.copy(), f)
# -----Series-----
d = data.copy()['c']
# reset_index
f = lambda x: x.reset_index(inplace=True, drop=True)
_check_f(data.set_index('a')['c'], f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(d.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(d.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
_check_f(d.copy(), f)
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip('IPython', minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; df = pd.DataFrame()"
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('df.', 1))
| bsd-3-clause |
Winand/pandas | pandas/tests/groupby/test_categorical.py | 14 | 23584 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import pytest
import numpy as np
from numpy import nan
import pandas as pd
from pandas import (Index, MultiIndex, CategoricalIndex,
DataFrame, Categorical, Series, Interval)
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas.util.testing as tm
from .common import MixIn
class TestGroupByCategorical(MixIn):
def test_level_groupby_get_group(self):
# GH15155
df = DataFrame(data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[pd.CategoricalIndex(["a", "b"]), range(10)],
labels=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"]))
g = df.groupby(level=["Index1"])
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(data=np.arange(2, 12, 2),
index=pd.MultiIndex(levels=[pd.CategoricalIndex(
["a", "b"]), range(5)],
labels=[[0] * 5, range(5)],
names=["Index1", "Index2"]))
result = g.get_group('a')
assert_frame_equal(result, expected)
def test_apply_use_categorical_name(self):
from pandas import qcut
cats = qcut(self.df.C, 4)
def get_stats(group):
return {'min': group.min(),
'max': group.max(),
'count': group.count(),
'mean': group.mean()}
result = self.df.groupby(cats).D.apply(get_stats)
assert result.index.names[0] == 'C'
def test_apply_categorical_data(self):
# GH 10138
for ordered in [True, False]:
dense = Categorical(list('abc'), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(
list('aaa'), categories=['a', 'b'], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({'missing': missing,
'dense': dense,
'values': values})
grouped = df.groupby(['missing', 'dense'])
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_product(
[Categorical(['a', 'b'], ordered=ordered),
Categorical(['a', 'b', 'c'], ordered=ordered)],
names=['missing', 'dense'])
expected = DataFrame([0, 1, 2, np.nan, np.nan, np.nan],
index=idx,
columns=['values'])
assert_frame_equal(grouped.apply(lambda x: np.mean(x)), expected)
assert_frame_equal(grouped.mean(), expected)
assert_frame_equal(grouped.agg(np.mean), expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_product([['a'], ['a', 'b', 'c']],
names=['missing', 'dense'])
expected = Series(1, index=idx)
assert_series_equal(grouped.apply(lambda x: 1), expected)
def test_groupby_categorical(self):
levels = ['foo', 'bar', 'baz', 'qux']
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats).mean()
expected = data.groupby(np.asarray(cats)).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories,
ordered=True)
expected = expected.reindex(exp_idx)
assert_frame_equal(result, expected)
grouped = data.groupby(cats)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(ord_labels, ordered=True,
categories=['foo', 'bar', 'baz', 'qux'])
expected = ord_data.groupby(exp_cats, sort=False).describe()
assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8),
levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index
.get_level_values(0)), exp)
exp = Index(['count', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max'] * 4)
tm.assert_index_equal((desc_result.stack().index
.get_level_values(1)), exp)
def test_groupby_datetime_categorical(self):
# GH9049: ensure backward compatibility
levels = pd.date_range('2014-01-01', periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats).mean()
expected = data.groupby(np.asarray(cats)).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(expected.index,
categories=expected.index,
ordered=True)
assert_frame_equal(result, expected)
grouped = data.groupby(cats)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take_nd(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels).describe()
assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0),
expected.index.get_level_values(0))
# GH 10460
expc = Categorical.from_codes(
np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index
.get_level_values(0)), exp)
exp = Index(['count', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max'] * 4)
tm.assert_index_equal((desc_result.stack().index
.get_level_values(1)), exp)
def test_groupby_categorical_index(self):
s = np.random.RandomState(12345)
levels = ['foo', 'bar', 'baz', 'qux']
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(
np.repeat(
np.arange(20), 4).reshape(-1, 4), columns=list('abcd'))
df['cats'] = cats
# with a cat index
result = df.set_index('cats').groupby(level=0).sum()
expected = df[list('abcd')].groupby(cats.codes).sum()
expected.index = CategoricalIndex(
Categorical.from_codes(
[0, 1, 2, 3], levels, ordered=True), name='cats')
assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby('cats').sum()
expected = df[list('abcd')].groupby(cats.codes).sum()
expected.index = CategoricalIndex(
Categorical.from_codes(
[0, 1, 2, 3], levels, ordered=True), name='cats')
assert_frame_equal(result, expected)
def test_groupby_describe_categorical_columns(self):
# GH 11558
cats = pd.CategoricalIndex(['qux', 'foo', 'baz', 'bar'],
categories=['foo', 'bar', 'baz', 'qux'],
ordered=True)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_groupby_unstack_categorical(self):
# GH11558 (example is taken from the original issue)
df = pd.DataFrame({'a': range(10),
'medium': ['A', 'B'] * 5,
'artist': list('XYXXY') * 2})
df['medium'] = df['medium'].astype('category')
gcat = df.groupby(['artist', 'medium'])['a'].count().unstack()
result = gcat.describe()
exp_columns = pd.CategoricalIndex(['A', 'B'], ordered=False,
name='medium')
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat['A'] + gcat['B']
expected = pd.Series([6, 4], index=pd.Index(['X', 'Y'], name='artist'))
tm.assert_series_equal(result, expected)
def test_groupby_bins_unequal_len(self):
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
def f():
series.groupby(bins).mean()
pytest.raises(ValueError, f)
def test_groupby_multi_categorical_as_index(self):
# GH13204
df = DataFrame({'cat': Categorical([1, 2, 2], [1, 2, 3]),
'A': [10, 11, 11],
'B': [101, 102, 103]})
result = df.groupby(['cat', 'A'], as_index=False).sum()
expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]),
'A': [10, 11, 10, 11, 10, 11],
'B': [101.0, nan, nan, 205.0, nan, nan]},
columns=['cat', 'A', 'B'])
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, 'A']
result = df.groupby(['cat', f], as_index=False).sum()
expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]),
'A': [10.0, nan, nan, 22.0, nan, nan],
'B': [101.0, nan, nan, 205.0, nan, nan]},
columns=['cat', 'A', 'B'])
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(['a', 'b', 'b'], name='cat')
result = df.groupby(['cat', s], as_index=False).sum()
expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]),
'A': [10.0, nan, nan, 22.0, nan, nan],
'B': [101.0, nan, nan, 205.0, nan, nan]},
columns=['cat', 'A', 'B'])
tm.assert_frame_equal(result, expected)
# is original index dropped?
expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]),
'A': [10, 11, 10, 11, 10, 11],
'B': [101.0, nan, nan, 205.0, nan, nan]},
columns=['cat', 'A', 'B'])
group_columns = ['cat', 'A']
for name in [None, 'X', 'B', 'cat']:
df.index = Index(list("abc"), name=name)
if name in group_columns and name in df.index.names:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = df.groupby(group_columns, as_index=False).sum()
else:
result = df.groupby(group_columns, as_index=False).sum()
tm.assert_frame_equal(result, expected, check_index_type=True)
def test_groupby_preserve_categories(self):
# GH-13179
categories = list('abc')
# ordered=True
df = DataFrame({'A': pd.Categorical(list('ba'),
categories=categories,
ordered=True)})
index = pd.CategoricalIndex(categories, categories, ordered=True)
tm.assert_index_equal(df.groupby('A', sort=True).first().index, index)
tm.assert_index_equal(df.groupby('A', sort=False).first().index, index)
# ordered=False
df = DataFrame({'A': pd.Categorical(list('ba'),
categories=categories,
ordered=False)})
sort_index = pd.CategoricalIndex(categories, categories, ordered=False)
nosort_index = pd.CategoricalIndex(list('bac'), list('bac'),
ordered=False)
tm.assert_index_equal(df.groupby('A', sort=True).first().index,
sort_index)
tm.assert_index_equal(df.groupby('A', sort=False).first().index,
nosort_index)
def test_groupby_preserve_categorical_dtype(self):
# GH13743, GH13854
df = DataFrame({'A': [1, 2, 1, 1, 2],
'B': [10, 16, 22, 28, 34],
'C1': Categorical(list("abaab"),
categories=list("bac"),
ordered=False),
'C2': Categorical(list("abaab"),
categories=list("bac"),
ordered=True)})
# single grouper
exp_full = DataFrame({'A': [2.0, 1.0, np.nan],
'B': [25.0, 20.0, np.nan],
'C1': Categorical(list("bac"),
categories=list("bac"),
ordered=False),
'C2': Categorical(list("bac"),
categories=list("bac"),
ordered=True)})
for col in ['C1', 'C2']:
result1 = df.groupby(by=col, as_index=False).mean()
result2 = df.groupby(by=col, as_index=True).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
# multiple grouper
exp_full = DataFrame({'A': [1, 1, 1, 2, 2, 2],
'B': [np.nan, 20.0, np.nan, 25.0, np.nan,
np.nan],
'C1': Categorical(list("bacbac"),
categories=list("bac"),
ordered=False),
'C2': Categorical(list("bacbac"),
categories=list("bac"),
ordered=True)})
for cols in [['A', 'C1'], ['A', 'C2']]:
result1 = df.groupby(by=cols, as_index=False).mean()
result2 = df.groupby(by=cols, as_index=True).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
def test_groupby_categorical_no_compress(self):
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats).mean()
exp = data.groupby(codes).mean()
exp.index = CategoricalIndex(exp.index, categories=cats.categories,
ordered=cats.ordered)
assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats).mean()
exp = data.groupby(codes).mean().reindex(cats.categories)
exp.index = CategoricalIndex(exp.index, categories=cats.categories,
ordered=cats.ordered)
assert_series_equal(result, exp)
cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b").mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_groupby_sort_categorical(self):
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame([['(7.5, 10]', 10, 10],
['(7.5, 10]', 8, 20],
['(2.5, 5]', 5, 30],
['(5, 7.5]', 6, 40],
['(2.5, 5]', 4, 50],
['(0, 2.5]', 1, 60],
['(5, 7.5]', 7, 70]], columns=['range', 'foo', 'bar'])
df['range'] = Categorical(df['range'], ordered=True)
index = CategoricalIndex(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]',
'(7.5, 10]'], name='range', ordered=True)
result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]],
columns=['foo', 'bar'], index=index)
col = 'range'
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
# when categories is ordered, group is ordered by category's order
assert_frame_equal(result_sort, df.groupby(col, sort=False).first())
df['range'] = Categorical(df['range'], ordered=False)
index = CategoricalIndex(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]',
'(7.5, 10]'], name='range')
result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]],
columns=['foo', 'bar'], index=index)
index = CategoricalIndex(['(7.5, 10]', '(2.5, 5]', '(5, 7.5]',
'(0, 2.5]'],
categories=['(7.5, 10]', '(2.5, 5]',
'(5, 7.5]', '(0, 2.5]'],
name='range')
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
index=index, columns=['foo', 'bar'])
col = 'range'
# this is an unordered categorical, but we allow this ####
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
assert_frame_equal(result_nosort, df.groupby(col, sort=False).first())
def test_groupby_sort_categorical_datetimelike(self):
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame({'dt': [datetime(2011, 7, 1), datetime(2011, 7, 1),
datetime(2011, 2, 1), datetime(2011, 5, 1),
datetime(2011, 2, 1), datetime(2011, 1, 1),
datetime(2011, 5, 1)],
'foo': [10, 8, 5, 6, 4, 1, 7],
'bar': [10, 20, 30, 40, 50, 60, 70]},
columns=['dt', 'foo', 'bar'])
# ordered=True
df['dt'] = Categorical(df['dt'], ordered=True)
index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 7, 1)]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
result_sort.index = CategoricalIndex(index, name='dt', ordered=True)
index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 1, 1)]
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
columns=['foo', 'bar'])
result_nosort.index = CategoricalIndex(index, categories=index,
name='dt', ordered=True)
col = 'dt'
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
# when categories is ordered, group is ordered by category's order
assert_frame_equal(result_sort, df.groupby(col, sort=False).first())
# ordered = False
df['dt'] = Categorical(df['dt'], ordered=False)
index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 7, 1)]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
result_sort.index = CategoricalIndex(index, name='dt')
index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 1, 1)]
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
columns=['foo', 'bar'])
result_nosort.index = CategoricalIndex(index, categories=index,
name='dt')
col = 'dt'
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
assert_frame_equal(result_nosort, df.groupby(col, sort=False).first())
def test_groupby_categorical_two_columns(self):
# https://github.com/pandas-dev/pandas/issues/8138
d = {'cat':
pd.Categorical(["a", "b", "a", "b"], categories=["a", "b", "c"],
ordered=True),
'ints': [1, 1, 2, 2],
'val': [10, 20, 30, 40]}
test = pd.DataFrame(d)
# Grouping on a single column
groups_single_key = test.groupby("cat")
res = groups_single_key.agg('mean')
exp_index = pd.CategoricalIndex(["a", "b", "c"], name="cat",
ordered=True)
exp = DataFrame({"ints": [1.5, 1.5, np.nan], "val": [20, 30, np.nan]},
index=exp_index)
tm.assert_frame_equal(res, exp)
# Grouping on two columns
groups_double_key = test.groupby(["cat", "ints"])
res = groups_double_key.agg('mean')
exp = DataFrame({"val": [10, 30, 20, 40, np.nan, np.nan],
"cat": pd.Categorical(["a", "a", "b", "b", "c", "c"],
ordered=True),
"ints": [1, 2, 1, 2, 1, 2]}).set_index(["cat", "ints"
])
tm.assert_frame_equal(res, exp)
# GH 10132
for key in [('a', 1), ('b', 2), ('b', 1), ('a', 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = test[(test.cat == c) & (test.ints == i)]
assert_frame_equal(result, expected)
d = {'C1': [3, 3, 4, 5], 'C2': [1, 2, 3, 4], 'C3': [10, 100, 200, 34]}
test = pd.DataFrame(d)
values = pd.cut(test['C1'], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = test.groupby([values, 'C2'])
res = groups_double_key.agg('mean')
nan = np.nan
idx = MultiIndex.from_product(
[Categorical([Interval(1, 2), Interval(2, 3),
Interval(3, 6)], ordered=True),
[1, 2, 3, 4]],
names=["cat", "C2"])
exp = DataFrame({"C1": [nan, nan, nan, nan, 3, 3,
nan, nan, nan, nan, 4, 5],
"C3": [nan, nan, nan, nan, 10, 100,
nan, nan, nan, nan, 200, 34]}, index=idx)
tm.assert_frame_equal(res, exp)
| bsd-3-clause |
riga/law | examples/parallel_optimization/tasks.py | 1 | 4708 | # coding: utf-8
import os
import luigi
import law
law.contrib.load("matplotlib")
class Task(law.Task):
"""
Base that provides some convenience methods to create local file and
directory targets at the default data path.
"""
def local_path(self, *path):
# ANALYSIS_DATA_PATH is defined in setup.sh
parts = (os.getenv("ANALYSIS_DATA_PATH"), self.__class__.__name__) + path
return os.path.join(*parts)
def local_target(self, *path, **kwargs):
return law.LocalFileTarget(self.local_path(*path), **kwargs)
class Optimizer(Task, law.LocalWorkflow):
"""
Workflow that runs optimization.
"""
iterations = luigi.IntParameter(default=10, description="Number of iterations")
n_parallel = luigi.IntParameter(default=4, description="Number of parallel evaluations")
n_initial_points = luigi.IntParameter(default=10, description="Number of random sampled values \
before starting optimizations")
def create_branch_map(self):
return list(range(self.iterations))
def requires(self):
if self.branch == 0:
return None
return Optimizer.req(self, branch=self.branch - 1)
def output(self):
return self.local_target("optimizer_{}.pkl".format(self.branch))
def run(self):
import skopt
optimizer = self.input().load() if self.branch != 0 else skopt.Optimizer(
dimensions=[skopt.space.Real(-5.0, 10.0), skopt.space.Real(0.0, 15.0)],
random_state=1, n_initial_points=self.n_initial_points
)
x = optimizer.ask(n_points=self.n_parallel)
output = yield Objective.req(self, x=x, iteration=self.branch, branch=-1)
y = [f.load()["y"] for f in output["collection"].targets.values()]
optimizer.tell(x, y)
print("minimum after {} iterations: {}".format(self.branch + 1, min(optimizer.yi)))
with self.output().localize("w") as tmp:
tmp.dump(optimizer)
@luigi.util.inherits(Optimizer)
class OptimizerPlot(Task, law.LocalWorkflow):
"""
Workflow that runs optimization and plots results.
"""
plot_objective = luigi.BoolParameter(default=True, description="Plot objective. \
Can be expensive to evaluate for high dimensional input")
def create_branch_map(self):
return list(range(self.iterations))
def requires(self):
return Optimizer.req(self)
def has_fitted_model(self):
return self.plot_objective and (self.branch + 1) * self.n_parallel >= self.n_initial_points
def output(self):
collection = {
"evaluations": self.local_target("evaluations_{}.pdf".format(self.branch)),
"convergence": self.local_target("convergence_{}.pdf".format(self.branch))
}
if self.has_fitted_model():
collection["objective"] = self.local_target("objective_{}.pdf".format(self.branch))
return law.SiblingFileCollection(collection)
def run(self):
from skopt.plots import plot_objective, plot_evaluations, plot_convergence
import matplotlib.pyplot as plt
result = self.input().load().run(None, 0)
output = self.output()
with output.targets["convergence"].localize("w") as tmp:
plot_convergence(result)
tmp.dump(plt.gcf(), bbox_inches="tight")
plt.close()
with output.targets["evaluations"].localize("w") as tmp:
plot_evaluations(result, bins=10)
tmp.dump(plt.gcf(), bbox_inches="tight")
plt.close()
if self.has_fitted_model():
plot_objective(result)
with output.targets["objective"].localize("w") as tmp:
tmp.dump(plt.gcf(), bbox_inches="tight")
plt.close()
class Objective(Task, law.LocalWorkflow):
"""
Objective to optimize.
This workflow will evaluate the branin function for given values `x`.
In a real world example this will likely be a expensive to compute function like a
neural network training or other computational demanding task.
The workflow can be easily extended as a remote workflow to submit evaluation jobs
to a batch system in order to run calculations in parallel.
"""
x = luigi.ListParameter()
iteration = luigi.IntParameter()
def create_branch_map(self):
return {i: x for i, x in enumerate(self.x)}
def output(self):
return self.local_target("x_{}_{}.json".format(self.iteration, self.branch))
def run(self):
from skopt.benchmarks import branin
with self.output().localize("w") as tmp:
tmp.dump({"x": self.branch_data, "y": branin(self.branch_data)})
| bsd-3-clause |
nesterione/scikit-learn | sklearn/preprocessing/__init__.py | 268 | 1319 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
| bsd-3-clause |
landlab/landlab | landlab/components/chi_index/channel_chi.py | 3 | 27201 | # -*- coding: utf-8 -*-
"""Created March 2016.
@author: dejh
"""
import numpy as np
from landlab import Component, RasterModelGrid
try:
from itertools import izip
except ImportError:
izip = zip
class ChiFinder(Component):
"""Calculate Chi Indices.
This component calculates chi indices, sensu Perron & Royden, 2013,
for a Landlab landscape.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator, FastscapeEroder
>>> from landlab.components import ChiFinder
>>> mg = RasterModelGrid((3, 4))
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = mg.BC_NODE_IS_CLOSED
>>> _ = mg.add_field("topographic__elevation", mg.node_x, at="node")
>>> fr = FlowAccumulator(mg, flow_director='D8')
>>> cf = ChiFinder(mg,
... min_drainage_area=1.,
... reference_concavity=1.)
>>> fr.run_one_step()
>>> cf.calculate_chi()
>>> mg.at_node['channel__chi_index'].reshape(mg.shape)[1, :]
array([ 0.5, 1. , 2. , 0. ])
>>> mg2 = RasterModelGrid((5, 5), xy_spacing=100.)
>>> for nodes in (mg2.nodes_at_right_edge, mg2.nodes_at_bottom_edge,
... mg2.nodes_at_top_edge):
... mg2.status_at_node[nodes] = mg2.BC_NODE_IS_CLOSED
>>> _ = mg2.add_zeros('node', 'topographic__elevation')
>>> mg2.at_node['topographic__elevation'][mg2.core_nodes] = mg2.node_x[
... mg2.core_nodes]/1000.
>>> np.random.seed(0)
>>> mg2.at_node['topographic__elevation'][
... mg2.core_nodes] += np.random.rand(mg2.number_of_core_nodes)
>>> fr2 = FlowAccumulator(mg2, flow_director='D8')
>>> sp2 = FastscapeEroder(mg2, K_sp=0.01)
>>> cf2 = ChiFinder(
... mg2,
... min_drainage_area=0.,
... reference_concavity=0.5)
>>> for i in range(10):
... mg2.at_node['topographic__elevation'][mg2.core_nodes] += 10.
... fr2.run_one_step()
... sp2.run_one_step(1000.)
>>> fr2.run_one_step()
>>> cf2.calculate_chi()
>>> mg2.at_node['channel__chi_index'].reshape(
... mg2.shape) # doctest: +NORMALIZE_WHITESPACE
array([[ 0. , 0. , 0. , 0. , 0. ],
[ 0.77219416, 1.54438833, 2.63643578, 2.61419437, 0. ],
[ 1.09204746, 2.18409492, 1.52214691, 2.61419437, 0. ],
[ 0.44582651, 0.89165302, 1.66384718, 2.75589464, 0. ],
[ 0. , 0. , 0. , 0. , 0. ]])
>>> cf3 = ChiFinder(
... mg2,
... min_drainage_area=20000.,
... use_true_dx=True,
... reference_concavity=0.5,
... reference_area=mg2.at_node['drainage_area'].max(),
... clobber=True)
>>> cf3.calculate_chi()
>>> cf3.chi_indices.reshape(mg2.shape) # doctest: +NORMALIZE_WHITESPACE
array([[ 0. , 0. , 0. , 0. , 0. ],
[ 0. , 173.20508076, 0. , 0. , 0. ],
[ 0. , 0. , 270.71067812, 0. , 0. ],
[ 0. , 100. , 236.60254038, 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ]])
>>> cf3.hillslope_mask.reshape(mg2.shape)
array([[ True, True, True, True, True],
[False, False, True, True, True],
[ True, True, False, True, True],
[False, False, False, True, True],
[ True, True, True, True, True]], dtype=bool)
References
----------
**Required Software Citation(s) Specific to this Component**
None Listed
**Additional References**
Perron, J., Royden, L. (2012). An integral approach to bedrock river
profile analysis Earth Surface Processes and Landforms 38(6), 570-576.
https://dx.doi.org/10.1002/esp.3302
"""
_name = "ChiFinder"
_unit_agnostic = True
_info = {
"channel__chi_index": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "variable",
"mapping": "node",
"doc": "the local steepness index",
},
"drainage_area": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "m**2",
"mapping": "node",
"doc": "Upstream accumulated surface area contributing to the node's discharge",
},
"flow__link_to_receiver_node": {
"dtype": int,
"intent": "in",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "ID of link downstream of each node, which carries the discharge",
},
"flow__receiver_node": {
"dtype": int,
"intent": "in",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "Node array of receivers (node that receives flow from current node)",
},
"flow__upstream_node_order": {
"dtype": int,
"intent": "in",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "Node array containing downstream-to-upstream ordered list of node IDs",
},
"topographic__elevation": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Land surface topographic elevation",
},
"topographic__steepest_slope": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "The steepest *downhill* slope",
},
}
def __init__(
self,
grid,
reference_concavity=0.5,
min_drainage_area=1.0e6,
reference_area=1.0,
use_true_dx=False,
clobber=False,
):
"""
Parameters
----------
grid : RasterModelGrid
A landlab RasterModelGrid.
reference_concavity : float
The reference concavity to use in the calculation.
min_drainage_area : float (m**2)
The drainage area down to which to calculate chi.
reference_area : float or None (m**2)
If None, will default to the mean core cell area on the grid.
Else, provide a value to use. Essentially becomes a prefactor on the
value of chi.
use_true_dx : bool (default False)
If True, integration to give chi is performed using each value of node
spacing along the channel (which can lead to a quantization effect,
and is not preferred by Taylor & Royden). If False, the mean value of
node spacing along the all channels is assumed everywhere.
clobber : bool (default False)
Raise an exception if adding an already existing field.
"""
super().__init__(grid)
if grid.at_node["flow__receiver_node"].size != grid.size("node"):
msg = (
"A route-to-multiple flow director has been "
"run on this grid. The landlab development team has not "
"verified that ChiFinder is compatible with "
"route-to-multiple methods. Please open a GitHub Issue "
"to start this process."
)
raise NotImplementedError(msg)
if isinstance(self._grid, RasterModelGrid):
self._link_lengths = self._grid.length_of_d8
else:
self._link_lengths = self._grid.length_of_link # not tested
self._reftheta = reference_concavity
self._min_drainage = min_drainage_area
self._set_up_reference_area(reference_area)
self._use_true_dx = use_true_dx
self._chi = self._grid.add_zeros("node", "channel__chi_index", clobber=clobber)
self._mask = self._grid.ones("node", dtype=bool)
self._elev = self._grid.at_node["topographic__elevation"]
def _set_up_reference_area(self, reference_area):
"""Set up and validate reference_area."""
if reference_area <= 0.0:
raise ValueError(
"ChiFinder: reference_area must be positive."
) # not tested
self._A0 = reference_area
def calculate_chi(self):
"""Calculate local chi indices.
This is the main method. Call it to calculate local chi indices
at all points with drainage areas greater than `min_drainage_area`.
Chi of any node without a defined value is reported as 0. These nodes
are also identified in the mask retrieved with :func:`hillslope_mask`.
"""
self._mask.fill(True)
self._chi.fill(0.0)
reftheta = self._reftheta
min_drainage = self._min_drainage
reference_area = self._A0
self._set_up_reference_area(reference_area)
use_true_dx = self._use_true_dx
upstr_order = self._grid.at_node["flow__upstream_node_order"]
# get an array of only nodes with A above threshold:
valid_upstr_order = upstr_order[
self._grid.at_node["drainage_area"][upstr_order] >= min_drainage
]
valid_upstr_areas = self._grid.at_node["drainage_area"][valid_upstr_order]
if not use_true_dx:
chi_integrand = (self._A0 / valid_upstr_areas) ** reftheta
mean_dx = self.mean_channel_node_spacing(valid_upstr_order)
self.integrate_chi_avg_dx(
valid_upstr_order, chi_integrand, self._chi, mean_dx
)
else:
chi_integrand = self._grid.zeros("node")
chi_integrand[valid_upstr_order] = (
self._A0 / valid_upstr_areas
) ** reftheta
self.integrate_chi_each_dx(valid_upstr_order, chi_integrand, self._chi)
# stamp over the closed nodes, as it's possible they can receive infs
# if min_drainage_area < grid.cell_area_at_node
self._chi[self._grid.status_at_node == self._grid.BC_NODE_IS_CLOSED] = 0.0
self._mask[valid_upstr_order] = False
def integrate_chi_avg_dx(
self, valid_upstr_order, chi_integrand, chi_array, mean_dx
):
"""Calculates chi at each channel node by summing chi_integrand.
This method assumes a uniform, mean spacing between nodes. Method is
deliberately split out for potential cythonization at a later stage.
Parameters
----------
valid_upstr_order : array of ints
nodes in the channel network in upstream order.
chi_integrand : array of floats
The value (A0/A)**concavity, in upstream order.
chi_array : array of floats
Array in which to store chi.
mean_dx : float
The mean node spacing in the network.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator
>>> from landlab.components import ChiFinder
>>> mg = RasterModelGrid((5, 4))
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = mg.BC_NODE_IS_CLOSED
>>> z = mg.node_x.copy()
>>> z[[5, 13]] = z[6] # guard nodes
>>> _ = mg.add_field("topographic__elevation", z, at="node")
>>> fr = FlowAccumulator(mg, flow_director='D8')
>>> cf = ChiFinder(mg)
>>> fr.run_one_step()
>>> ch_nodes = np.array([4, 8, 12, 5, 9, 13, 6, 10, 14])
>>> ch_integrand = 3.*np.ones(9, dtype=float) # to make calc clearer
>>> chi_array = np.zeros(mg.number_of_nodes, dtype=float)
>>> cf.integrate_chi_avg_dx(ch_nodes, ch_integrand, chi_array, 0.5)
>>> chi_array.reshape(mg.shape)
array([[ 0. , 0. , 0. , 0. ],
[ 1.5, 3. , 4.5, 0. ],
[ 1.5, 3. , 4.5, 0. ],
[ 1.5, 3. , 4.5, 0. ],
[ 0. , 0. , 0. , 0. ]])
"""
receivers = self._grid.at_node["flow__receiver_node"]
# because chi_array is all zeros, BC cases where node is receiver
# resolve themselves
for (node, integrand) in izip(valid_upstr_order, chi_integrand):
dstr_node = receivers[node]
chi_array[node] = chi_array[dstr_node] + integrand
chi_array *= mean_dx
def integrate_chi_each_dx(
self, valid_upstr_order, chi_integrand_at_nodes, chi_array
):
"""Calculates chi at each channel node by summing chi_integrand*dx.
This method accounts explicitly for spacing between each node. Method
is deliberately split out for potential cythonization at a later
stage. Uses a trapezium integration method.
Parameters
----------
valid_upstr_order : array of ints
nodes in the channel network in upstream order.
chi_integrand_at_nodes : array of floats
The value (A0/A)**concavity, in *node* order.
chi_array : array of floats
Array in which to store chi.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator
>>> from landlab.components import ChiFinder
>>> mg = RasterModelGrid((5, 4), xy_spacing=3.)
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = mg.BC_NODE_IS_CLOSED
>>> z = mg.node_x.copy()
>>> z[[5, 13]] = z[6] # guard nodes
>>> _ = mg.add_field("topographic__elevation", z, at="node")
>>> fr = FlowAccumulator(mg, flow_director='D8')
>>> cf = ChiFinder(mg)
>>> fr.run_one_step()
>>> ch_nodes = np.array([4, 8, 12, 5, 9, 13, 6, 10, 14])
>>> ch_integrand = 2.*np.ones(mg.number_of_nodes,
... dtype=float) # to make calc clearer
>>> chi_array = np.zeros(mg.number_of_nodes, dtype=float)
>>> cf.integrate_chi_each_dx(ch_nodes, ch_integrand, chi_array)
>>> chi_array.reshape(mg.shape)
array([[ 0. , 0. , 0. , 0. ],
[ 0. , 6. , 14.48528137, 0. ],
[ 0. , 6. , 12. , 0. ],
[ 0. , 6. , 14.48528137, 0. ],
[ 0. , 0. , 0. , 0. ]])
>>> from landlab.components import FastscapeEroder
>>> mg2 = RasterModelGrid((5, 5), xy_spacing=100.)
>>> for nodes in (mg2.nodes_at_right_edge, mg2.nodes_at_bottom_edge,
... mg2.nodes_at_top_edge):
... mg2.status_at_node[nodes] = mg2.BC_NODE_IS_CLOSED
>>> _ = mg2.add_zeros('node', 'topographic__elevation')
>>> mg2.at_node['topographic__elevation'][mg2.core_nodes] = mg2.node_x[
... mg2.core_nodes]/1000.
>>> np.random.seed(0)
>>> mg2.at_node['topographic__elevation'][
... mg2.core_nodes] += np.random.rand(mg2.number_of_core_nodes)
>>> fr2 = FlowAccumulator(mg2, flow_director='D8')
>>> sp2 = FastscapeEroder(mg2, K_sp=0.01)
>>> cf2 = ChiFinder(mg2, min_drainage_area=1., reference_concavity=0.5,
... use_true_dx=True)
>>> for i in range(10):
... mg2.at_node['topographic__elevation'][mg2.core_nodes] += 10.
... fr2.run_one_step()
... sp2.run_one_step(1000.)
>>> fr2.run_one_step()
>>> output_array = np.zeros(25, dtype=float)
>>> cf2.integrate_chi_each_dx(mg2.at_node['flow__upstream_node_order'],
... np.ones(25, dtype=float),
... output_array)
>>> output_array.reshape(mg2.shape)
array([[ 0. , 0. , 0. , 0. , 0. ],
[ 0. , 100. , 200. , 382.84271247, 0. ],
[ 0. , 100. , 241.42135624, 341.42135624, 0. ],
[ 0. , 100. , 200. , 300. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ]])
"""
receivers = self._grid.at_node["flow__receiver_node"]
links = self._grid.at_node["flow__link_to_receiver_node"]
# because chi_array is all zeros, BC cases where node is receiver
# resolve themselves
half_integrand = 0.5 * chi_integrand_at_nodes
for node in valid_upstr_order:
dstr_node = receivers[node]
dstr_link = links[node]
if dstr_link != self._grid.BAD_INDEX:
dstr_length = self._link_lengths[dstr_link]
half_head_val = half_integrand[node]
half_tail_val = half_integrand[dstr_node]
mean_val = half_head_val + half_tail_val
chi_to_add = mean_val * dstr_length
chi_array[node] = chi_array[dstr_node] + chi_to_add
def mean_channel_node_spacing(self, ch_nodes):
"""Calculates the mean spacing between all adjacent channel nodes.
Parameters
----------
ch_nodes : array of ints
The nodes within the defined channel network.
Returns
-------
mean_spacing : float (m)
The mean spacing between all nodes in the network.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator
>>> from landlab.components import ChiFinder
>>> mg = RasterModelGrid((5, 4), xy_spacing=2.)
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = mg.BC_NODE_IS_CLOSED
>>> z = mg.node_x.copy()
>>> z[[5, 13]] = z[6] # guard nodes
>>> _ = mg.add_field("topographic__elevation", z, at="node")
>>> fr = FlowAccumulator(mg, flow_director='D8')
>>> cf = ChiFinder(mg)
>>> fr.run_one_step()
>>> ch_nodes = np.array([4, 8, 12, 5, 9, 13, 6, 10, 14])
>>> cf.mean_channel_node_spacing(ch_nodes)
2.2761423749153966
"""
ch_links = self._grid.at_node["flow__link_to_receiver_node"][ch_nodes]
ch_links_valid = ch_links[ch_links != self._grid.BAD_INDEX]
valid_link_lengths = self._link_lengths[ch_links_valid]
return valid_link_lengths.mean()
@property
def chi_indices(self):
"""Return the array of channel steepness indices.
Nodes not in the channel receive zeros.
"""
return self._chi
@property
def hillslope_mask(self):
"""Return a boolean array, False where steepness indices exist."""
return self._mask
def best_fit_chi_elevation_gradient_and_intercept(self, ch_nodes=None):
"""Returns least squares best fit for a straight line through a chi
plot.
Parameters
----------
ch_nodes : array of ints or None
Nodes at which to consider chi and elevation values. If None,
will use all nodes in grid with area greater than the component
min_drainage_area.
Returns
-------
coeffs : array(gradient, intercept)
A len-2 array containing the m then z0, where z = z0 + m * chi.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator
>>> from landlab.components import ChiFinder
>>> mg = RasterModelGrid((3, 4))
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = mg.BC_NODE_IS_CLOSED
>>> z = mg.add_field("topographic__elevation", mg.node_x.copy(), at="node")
>>> z[4:8] = np.array([0.5, 1., 2., 0.])
>>> fr = FlowAccumulator(mg, flow_director='D8')
>>> cf = ChiFinder(
... mg,
... min_drainage_area=1.,
... reference_concavity=1.)
>>> fr.run_one_step()
>>> cf.calculate_chi()
>>> mg.at_node['channel__chi_index'].reshape(mg.shape)[1, :]
array([ 0.5, 1. , 2. , 0. ])
>>> coeffs = cf.best_fit_chi_elevation_gradient_and_intercept()
>>> np.allclose(np.array([1., 0.]), coeffs)
True
"""
if ch_nodes is None:
good_vals = np.logical_not(self._mask)
else:
good_vals = np.array(ch_nodes) # not tested
chi_vals = self._chi[good_vals]
elev_vals = self._grid.at_node["topographic__elevation"][good_vals]
coeffs = np.polyfit(chi_vals, elev_vals, 1)
return coeffs
def nodes_downstream_of_channel_head(self, channel_head):
"""Find and return an array with nodes downstream of channel_head.
Parameters
----------
channel_head : int
Node ID of channel head from which to get downstream nodes.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator, ChiFinder
>>> mg = RasterModelGrid((3, 4))
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = mg.BC_NODE_IS_CLOSED
>>> z = mg.add_field("topographic__elevation", mg.node_x.copy(), at="node")
>>> z[4:8] = np.array([0.5, 1., 2., 0.])
>>> fr = FlowAccumulator(mg, flow_director='D8')
>>> fr.run_one_step()
>>> mg.at_node['flow__receiver_node']
array([ 0, 1, 2, 3, 4, 4, 5, 7, 8, 9, 10, 11])
>>> cf = ChiFinder(mg, min_drainage_area=0., reference_concavity=1.)
>>> cf.calculate_chi()
>>> cf.nodes_downstream_of_channel_head(6)
[6, 5, 4]
"""
ch_nodes = []
current_node = channel_head
while True:
ch_A = self._grid.at_node["drainage_area"][current_node]
if ch_A > self._min_drainage:
ch_nodes.append(current_node)
next_node = self._grid.at_node["flow__receiver_node"][current_node]
if next_node == current_node:
break
else:
current_node = next_node
return ch_nodes
def create_chi_plot(
self,
channel_heads=None,
label_axes=True,
symbol="kx",
plot_line=False,
line_symbol="r-",
):
"""Plots a "chi plot" (chi vs elevation for points in channel network).
If channel_heads is provided, only the channel nodes downstream of
the provided points (and with area > min_drainage_area) will be
plotted.
Parameters
----------
channel_heads : int, list or array of ints, or None
Node IDs of channel heads to from which plot downstream.
label_axes : bool
If True, labels the axes as "Chi" and "Elevation (m)".
symbol : str
A matplotlib-style string for the style to use for the points.
plot_line : bool
If True, will plot a linear best fit line through the data cloud.
line_symbol : str
A matplotlib-style string for the style to use for the line, if
plot_line.
"""
from matplotlib.pyplot import clf, figure, plot, xlabel, ylabel
figure("Chi plot")
clf()
if channel_heads is not None:
if plot_line:
good_nodes = set()
if isinstance(channel_heads, int):
channel_heads = [channel_heads]
for head in channel_heads:
ch_nodes = self.nodes_downstream_of_channel_head(head)
plot(
self._chi[ch_nodes],
self._grid.at_node["topographic__elevation"][ch_nodes],
symbol,
)
if plot_line:
good_nodes.update(ch_nodes)
else:
ch_nodes = np.logical_not(self._mask)
plot(
self._chi[ch_nodes],
self._grid.at_node["topographic__elevation"][ch_nodes],
symbol,
)
good_nodes = ch_nodes
if plot_line:
coeffs = self.best_fit_chi_elevation_gradient_and_intercept(good_nodes)
p = np.poly1d(coeffs)
chirange = np.linspace(
self._chi[good_nodes].min(), self._chi[good_nodes].max(), 100
)
plot(chirange, p(chirange), line_symbol)
if label_axes:
ylabel("Elevation (m)")
xlabel("Chi")
@property
def masked_chi_indices(self):
"""Returns a masked array version of the 'channel__chi_index' field.
This enables easier plotting of the values with.
:func:`landlab.imshow_grid_at_node` or similar.
Examples
--------
Make a topographic map with an overlay of chi values:
>>> from landlab import imshow_grid_at_node
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator, FastscapeEroder
>>> from landlab.components import ChiFinder
>>> mg = RasterModelGrid((5, 5), xy_spacing=100.)
>>> for nodes in (mg.nodes_at_right_edge, mg.nodes_at_bottom_edge,
... mg.nodes_at_top_edge):
... mg.status_at_node[nodes] = mg.BC_NODE_IS_CLOSED
>>> _ = mg.add_zeros('node', 'topographic__elevation')
>>> mg.at_node['topographic__elevation'][mg.core_nodes] = mg.node_x[
... mg.core_nodes]/1000.
>>> np.random.seed(0)
>>> mg.at_node['topographic__elevation'][
... mg.core_nodes] += np.random.rand(mg.number_of_core_nodes)
>>> fr = FlowAccumulator(mg, flow_director='D8')
>>> sp = FastscapeEroder(mg, K_sp=0.01)
>>> cf = ChiFinder(mg, min_drainage_area=20000.)
>>> for i in range(10):
... mg.at_node['topographic__elevation'][mg.core_nodes] += 10.
... fr.run_one_step()
... sp.run_one_step(1000.)
>>> fr.run_one_step()
>>> cf.calculate_chi()
>>> imshow_grid_at_node(mg, 'topographic__elevation',
... allow_colorbar=False)
>>> imshow_grid_at_node(mg, cf.masked_chi_indices,
... color_for_closed=None, cmap='winter')
"""
return np.ma.array(self._chi, mask=self._mask)
| mit |
zasdfgbnm/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 46 | 6682 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn,
steps=1,
metrics={
"label": metric_spec.MetricSpec(lambda predictions, labels: labels)
})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testFeatureEngineeringFnWithSameName(self):
def input_fn():
return {
"x": constant_op.constant(["9."])
}, {
"y": constant_op.constant(["99."])
}
def feature_engineering_fn(features, labels):
# Github #12205: raise a TypeError if called twice.
_ = string_ops.string_split(features["x"])
features["x"] = constant_op.constant([9.])
labels["y"] = constant_op.constant([99.])
return features, labels
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn,
steps=1,
metrics={
"label": metric_spec.MetricSpec(lambda predictions, labels: labels)
})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
wheldom01/privacyidea | privacyidea/lib/stats.py | 3 | 5545 | # -*- coding: utf-8 -*-
#
# 2015-07-16 Initial writeup
# (c) Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = """This module reads audit data and can create statistics from
audit data using pandas.
This module is tested in tests/test_lib_stats.py
"""
import logging
from privacyidea.lib.log import log_with
import datetime
import StringIO
log = logging.getLogger(__name__)
try:
import matplotlib
MATPLOT_READY = True
matplotlib.style.use('ggplot')
matplotlib.use('Agg')
except Exception as exx:
MATPLOT_READY = False
log.warning("If you want to see statistics you need to install python "
"matplotlib.")
customcmap = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
@log_with(log)
def get_statistics(auditobject, start_time=datetime.datetime.now()
-datetime.timedelta(days=7),
end_time=datetime.datetime.now()):
"""
Create audit statistics and return a JSON object
The auditobject is passed from the upper level, usually from the REST API
as g.auditobject.
:param auditobject: The audit object
:type auditobject: Audit Object as defined in auditmodules.base.Audit
:return: JSON
"""
result = {}
df = auditobject.get_dataframe(start_time=start_time, end_time=end_time)
# authentication successful/fail per user or serial
for key in ["user", "serial"]:
result["validate_{0!s}_plot".format(key)] = _get_success_fail(df, key)
# get simple usage
for key in ["serial", "action"]:
result["{0!s}_plot".format(key)] = _get_number_of(df, key)
# failed authentication requests
for key in ["user", "serial"]:
result["validate_failed_{0!s}_plot".format(key)] = _get_fail(df, key)
result["admin_plot"] = _get_number_of(df, "action", nums=20)
return result
def _get_success_fail(df, key):
try:
output = StringIO.StringIO()
series = df[df.action.isin(["POST /validate/check",
"GET /validate/check"])].groupby([key,
'success']).size().unstack()
fig = series.plot(kind="bar", stacked=True,
legend=True,
title="Authentications",
grid=True,
color=customcmap).get_figure()
fig.savefig(output, format="png")
o_data = output.getvalue()
output.close()
image_data = o_data.encode("base64")
image_uri = 'data:image/png;base64,{0!s}'.format(image_data)
except Exception as exx:
log.info(exx)
image_uri = "{0!s}".format(exx)
return image_uri
def _get_fail(df, key):
try:
output = StringIO.StringIO()
series = df[(df.success==0)
& (df.action.isin(["POST /validate/check",
"GET /validate/check"]))][
key].value_counts()[:5]
plot_canvas = matplotlib.pyplot.figure()
ax = plot_canvas.add_subplot(1,1,1)
fig = series.plot(ax=ax, kind="bar",
colormap="Reds",
stacked=False,
legend=False,
grid=True,
title="Failed Authentications").get_figure()
fig.savefig(output, format="png")
o_data = output.getvalue()
output.close()
image_data = o_data.encode("base64")
image_uri = 'data:image/png;base64,{0!s}'.format(image_data)
except Exception as exx:
log.info(exx)
image_uri = "{0!s}".format(exx)
return image_uri
def _get_number_of(df, key, nums=5):
"""
return a data url image with a single keyed value.
It plots the "nums" most occurrences of the "key" column in the dataframe.
:param df: The DataFrame
:type df: Pandas DataFrame
:param key: The key, which should be plotted.
:param count: how many of the most often values should be plotted
:return: A data url
"""
output = StringIO.StringIO()
output.truncate(0)
try:
plot_canvas = matplotlib.pyplot.figure()
ax = plot_canvas.add_subplot(1, 1, 1)
series = df[key].value_counts()[:nums]
fig = series.plot(ax=ax, kind="bar", colormap="Blues",
legend=False,
stacked=False,
title="Numbers of {0!s}".format(key),
grid=True).get_figure()
fig.savefig(output, format="png")
o_data = output.getvalue()
output.close()
image_data = o_data.encode("base64")
image_uri = 'data:image/png;base64,{0!s}'.format(image_data)
except Exception as exx:
log.info(exx)
image_uri = "No data"
return image_uri
| agpl-3.0 |
vollib/vollib | vollib/tests/test_utils.py | 1 | 1267 | from collections import OrderedDict
import simplejson as json
import pandas
def almost_equal(a,b,epsilon = 1.0e-7):
return abs(a-b)< epsilon
class TestDataIterator(object):
"""
>>> data_iterator = TestDataIterator()
>>> print data_iterator.has_next()
True
>>> r = data_iterator.next_row()
>>> print r['S']
100.0
"""
def __init__(self):
self.data = json.load(open('test_data.json','rb'))
columns = self.data['columns']
grid_data = OrderedDict()
for header in columns:
grid_data[header]=[]
for col in self.data['data']:
for i in range(len(columns)):
grid_data[columns[i]].append(col[i])
self.df = pandas.DataFrame(grid_data)
self.row_id = 0
self.row_count = self.df.S.count()
def next_row(self):
if self.has_next():
row = self.df.ix[self.row_id].to_dict()
self.row_id +=1
return row
def has_next(self):
return self.row_id < self.row_count
# -----------------------------------------------------------------------------
# MAIN
if __name__=='__main__':
import doctest
if not doctest.testmod().failed:
print "Doctest passed" | mit |
kabrau/PyImageRoi | source/CityscapeMask2Pascal.py | 1 | 4747 | import argparse
import json
import matplotlib.pyplot as plt
import skimage.io as io
import cv2
import numpy as np
import glob
import PIL.Image
import os,sys
from lxml import etree, objectify
def cityscapeMask2Pascal(databaseName, extractedFolderAnn, outputFolderAnn, imagesFolder):
print("extractedFolderAnn", extractedFolderAnn)
print("outputFolderAnn", outputFolderAnn)
print("imagesFolder", imagesFolder)
imageSufix = ""
if os.path.isfile(os.path.join(imagesFolder,"aachen_000000_000019_leftImg8bit.png")) or os.path.isfile(os.path.join(imagesFolder,"frankfurt_000000_000294_leftImg8bit.png")):
imageSufix = '_leftImg8bit.png'
elif os.path.isfile( os.path.join(imagesFolder,"aachen_000000_000019_leftImg8bit_foggy_beta_0.02.png")) or os.path.isfile( os.path.join(imagesFolder,"frankfurt_000000_000294_leftImg8bit_foggy_beta_0.02.png")):
imageSufix = '_leftImg8bit_foggy_beta_0.02.png'
elif os.path.isfile( os.path.join(imagesFolder,"aachen_000000_000019_leftImg8bit_foggy_beta_0.01.png")) or os.path.isfile( os.path.join(imagesFolder,"frankfurt_000000_000294_leftImg8bit_foggy_beta_0.01.png")):
imageSufix = '_leftImg8bit_foggy_beta_0.01.png'
elif os.path.isfile( os.path.join(imagesFolder,"aachen_000000_000019_leftImg8bit_foggy_beta_0.005.png")) or os.path.isfile( os.path.join(imagesFolder,"frankfurt_000000_000294_leftImg8bit_foggy_beta_0.005.png")):
imageSufix = '_leftImg8bit_foggy_beta_0.005.png'
if imageSufix=="":
print()
print("=== Atention ===")
print("Do not exist files in {}, or add new sufixe".format(imagesFolder) )
sys.exit()
#--------------------------------------------------------------------------
# download files from https://www.cityscapes-dataset.com/downloads/
# - gtFine_trainvaltest.zip (241MB) [md5]
# - leftImg8bit_trainvaltest.zip (11GB) [md5]
#--------------------------------------------------------------------------
# Set Labels to Include, if empty then all
include_labels = ["car", "bicycle", "person", "rider", "motorcycle", "bus", "truck", "train"]
#------------------------------------------------------------------------------------------------------------
categories = []
jsonFiles = glob.glob('{}**/**/*.json'.format(extractedFolderAnn))
#------------------------------------------------------------------------------------------------------------
for fileName in jsonFiles:
fileName = fileName.replace('\\','/')
jsonFileName = fileName.split('/')[-1:][0]
imageFileName = jsonFileName.replace('_gtFine_polygons.json',imageSufix)
xmlFileName = os.path.join(outputFolderAnn, imageFileName.replace('.png','.xml'))
with open(fileName,'r') as fp:
data = json.load(fp)
E = objectify.ElementMaker(annotate=False)
annotation = E.annotation(
E.folder(imagesFolder),
E.filename(imageFileName),
E.source(
E.database(databaseName),
E.annotation(jsonFileName),
E.image(imageFileName),
),
E.size(
E.width(data['imgWidth']),
E.height(data['imgHeight']),
E.depth('3'),
),
)
for obj_json in data['objects']:
label = obj_json['label'].replace(' ','_')
if len(include_labels)>0 and label not in include_labels:
continue
if label not in categories:
categories.append(label)
points=obj_json['polygon']
Xs = list(np.asarray( obj_json['polygon']).flatten())[::2] #pega todos os X
Ys = list(np.asarray( obj_json['polygon']).flatten())[1::2] #pega todos os Y
E = objectify.ElementMaker(annotate=False)
annotation.append(E.object(
E.name(label),
E.bndbox(
E.xmin(np.min(Xs)),
E.ymin(np.min(Ys)),
E.xmax(np.max(Xs)),
E.ymax(np.max(Ys)),
),
))
if not os.path.exists(os.path.dirname(xmlFileName)):
os.makedirs(os.path.dirname(xmlFileName))
etree.ElementTree(annotation).write(xmlFileName)
print('=========== Categorias ===============')
print(set(categories))
| mit |
TwistedHardware/roshanRush | data_set/models.py | 1 | 14792 | from datetime import datetime
import pandas as pd
#
from django.db import models
class DataGroup(models.Model):
"""
Represents a group an Data Sets
"""
"""
Fields
"""
name = models.CharField(max_length=200)
"""
Methods
"""
def __unicode__(self):
return self.name
"""
Classes
"""
class Meta:
verbose_name = "Data Group"
verbose_name_plural = "Data Groups"
class DataSetType(models.Model):
"""
Represents a schema for a Data Set
"""
"""
Fields
"""
name = models.CharField(max_length=200)
columns = models.ManyToManyField("Feature", limit_choices_to={"parent": None})
"""
Methods
"""
def __unicode__(self):
return self.name
"""
Classes
"""
class Meta:
verbose_name = "Data Set Type"
verbose_name_plural = "Data Set Types"
class DataSet(models.Model):
"""
Represents a Data Set which is basically a table of data that has the same schema
"""
"""
Fields
"""
name = models.CharField(max_length=200)
data_group = models.ForeignKey(DataGroup)
type = models.ForeignKey(DataSetType)
"""
Methods
"""
def __unicode__(self):
return self.name
def to_DataFrame(self, truncate=False, filter_features=None):
"""
Returns a dataframe representing the dataset
"""
# Load all features
if filter_features:
records = NumberFeature.objects.all().filter(feature__name=filter_features[0], value=filter_features[1]).values_list("record__id")
records = [item[0] for item in records]
else:
records = [item[0] for item in self.record_set.all().values_list("id")]
date_features = list(DateFeature.objects.all().filter(record__id__in=records).values("record__id", "feature__name", "value"))
numerical_features = list(NumberFeature.objects.all().filter(record__id__in=records).values("record__id", "feature__name", "value"))
boolean_features = list(BooleanFeature.objects.all().filter(record__id__in=records).values("record__id", "feature__name", "value"))
text_features = list(TextFeature.objects.all().filter(record__id__in=records).values("record__id", "feature__name", "value"))
file_features = list(FileFeature.objects.all().filter(record__id__in=records).values("record__id", "feature__name", "value"))
record_link_features = list(RecordLinkFeature.objects.all().filter(record__id__in=records).values("record__id", "feature__name", "value", "data_set__id"))
# Create dataset with record__id as index
dataset = pd.DataFrame({"record__id": records})
dataset.set_index("record__id", inplace=True)
# Process date features
if len(date_features) <> 0:
date_df = pd.DataFrame(date_features).pivot(index="record__id", columns="feature__name", values="value")
else:
date_df = pd.DataFrame()
# Process number features
if len(numerical_features) <> 0:
numerical_df = pd.DataFrame(numerical_features).pivot(index="record__id", columns="feature__name", values="value")
else:
numerical_df = pd.DataFrame()
# Process boolean features
if len(boolean_features) <> 0:
boolean_df = pd.DataFrame(boolean_features).pivot(index="record__id", columns="feature__name", values="value")
else:
boolean_df = pd.DataFrame()
# Process text features
if len(text_features) <> 0:
text_df = pd.DataFrame(text_features).pivot(index="record__id", columns="feature__name", values="value")
else:
text_df = pd.DataFrame()
# Process file features
if len(file_features) <> 0:
file_df = pd.DataFrame(file_features).pivot(index="record__id", columns="feature__name", values="value")
else:
file_df = pd.DataFrame()
# Process record link features
if len(record_link_features) <> 0:
record_link_df = pd.DataFrame(record_link_features).pivot(index="record__id", columns="feature__name", values="value")
else:
record_link_df = pd.DataFrame()
if truncate:
tr = lambda x: [str(item)[:100] for item in x]
return pd.concat([dataset, date_df, numerical_df, boolean_df, text_df, file_df, record_link_df], axis=1).apply(tr)
# Concatenate all features DataFrames to the Main IDs DataFrame and return it
return pd.concat([dataset, date_df, numerical_df, boolean_df, text_df, file_df, record_link_df], axis=1)
"""
Classes
"""
class Meta:
verbose_name = "Data Set"
verbose_name_plural = "Data Sets"
class FeatureType(models.Model):
"""
Represents a type of features
"""
"""
Options
"""
feature_db_type_options = (
("date", "Date Time"),
("number", "Number"),
("boolean", "Boolean"),
("text", "Text"),
("file", "File"),
("relation", "Relation"),
)
"""
Fields
"""
name = models.CharField(max_length=200)
feature_type = models.CharField(max_length=20, choices=feature_db_type_options)
"""
Methods
"""
def __unicode__(self):
return self.name
"""
Classes
"""
class Meta:
verbose_name = "Feature Type"
verbose_name_plural = "Feature Types"
class Feature(models.Model):
"""
Represents a feature which is basically a column in a Data Set
"""
"""
Fields
"""
name = models.CharField(max_length=200)
description = models.TextField(null=True, blank=True)
formula = models.TextField(null=True, blank=True)
type = models.ForeignKey(FeatureType)
parent = models.ForeignKey("self", null=True, blank=True)
related_to = models.ForeignKey(DataSet, null=True, blank=True, help_text="Use only for Relation Features")
"""
Methods
"""
def __unicode__(self):
return self.name
"""
Classes
"""
class Meta:
verbose_name = "Feature"
verbose_name_plural = "Features"
class Record(models.Model):
"""
Represents a record in a Data Set
"""
"""
Fields
"""
data_set = models.ForeignKey(DataSet)
create_date = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=200, null=True, blank=True)
original_id = models.BigIntegerField(null=True, blank=True)
"""
Methods
"""
def __unicode__(self):
return "%s" % self.create_date
def feature(self, feature, value=None, date_format="%Y%m%d"):
"""
Sets or get the value of a feature
"""
# Check the faeture_type
if feature.type.feature_type == "date":
value = datetime.strptime(str(value), date_format)
record_feature = self.datefeature_set.all().get(
record=self,
feature=feature,
)
elif feature.type.feature_type == "number":
record_feature = self.numberfeature_set.all().get(
record=self,
feature=feature,
)
elif feature.type.feature_type == "boolean":
record_feature = self.booleanfeature_set.all().get(
record=self,
feature=feature,
)
elif feature.type.feature_type == "text":
record_feature = self.textfeature_set.all().get(
record=self,
feature=feature,
)
elif feature.type.feature_type == "file":
record_feature = self.textfeature_set.all().get(
record=self,
feature=feature,
)
elif feature.type.feature_type == "relation":
record_feature = self.recordlinkfeature_set.all().get(
record=self,
feature=feature,
)
# Check if a value is set
if not value is None:
record_feature.value = value
record_feature.save()
return record_feature
def save(self, *args, **kwargs):
"""
Overrides the default save to insert features for new DataSets
"""
# Check if it is a new DataSet
if self.pk is None:
# Call Parent Save
super(Record, self).save(*args, **kwargs)
for feature in self.data_set.type.columns.all():
if feature.type.feature_type == "date":
self.datefeature_set.all().create(
record=self,
feature=feature,
)
elif feature.type.feature_type == "number":
self.numberfeature_set.all().create(
record=self,
feature=feature,
)
elif feature.type.feature_type == "boolean":
self.booleanfeature_set.all().create(
record=self,
feature=feature,
)
elif feature.type.feature_type == "text":
self.textfeature_set.all().create(
record=self,
feature=feature,
)
elif feature.type.feature_type == "file":
self.textfeature_set.all().create(
record=self,
feature=feature,
)
elif feature.type.feature_type == "relation":
self.recordlinkfeature_set.all().create(
record=self,
feature=feature,
data_set=feature.related_to,
)
else:
super(Record, self).save(*args, **kwargs)
"""
Classes
"""
class Meta:
verbose_name = "Record"
verbose_name_plural = "Records"
class DateFeature(models.Model):
"""
Represents a Feature of type Date
"""
"""
Fields
"""
record = models.ForeignKey(Record)
feature = models.ForeignKey(Feature)
value = models.DateTimeField(null=True, blank=True)
"""
Methods
"""
def __unicode__(self):
return "%s: %s" % (self.feature.name, self.value)
"""
Classes
"""
class Meta:
verbose_name = "Date Feature"
verbose_name_plural = "Date Features"
class NumberFeature(models.Model):
"""
Represents a Feature of type Number
"""
"""
Fields
"""
record = models.ForeignKey(Record)
feature = models.ForeignKey(Feature)
value = models.FloatField(null=True, blank=True)
"""
Methods
"""
def __unicode__(self):
return "%s: %s" % (self.feature.name, self.value)
"""
Classes
"""
class Meta:
verbose_name = "Number Feature"
verbose_name_plural = "Number Features"
class BooleanFeature(models.Model):
"""
Represents a Feature of type Boolean
"""
"""
Fields
"""
record = models.ForeignKey(Record)
feature = models.ForeignKey(Feature)
value = models.NullBooleanField(null=True, blank=True)
"""
Methods
"""
def __unicode__(self):
return "%s: %s" % (self.feature.name, self.value)
"""
Classes
"""
class Meta:
verbose_name = "Boolean Feature"
verbose_name_plural = "Boolean Features"
class TextFeature(models.Model):
"""
Represents a Feature of type Text
"""
"""
Fields
"""
record = models.ForeignKey(Record)
feature = models.ForeignKey(Feature)
value = models.TextField(null=True, blank=True)
"""
Methods
"""
def __unicode__(self):
return "%s: %s" % (self.feature.name, self.value)
"""
Classes
"""
class Meta:
verbose_name = "Text Feature"
verbose_name_plural = "Text Features"
class FileFeature(models.Model):
"""
Represents a Feature of type File
"""
"""
Fields
"""
record = models.ForeignKey(Record)
feature = models.ForeignKey(Feature)
value = models.FileField(upload_to="files", null=True, blank=True)
"""
Methods
"""
def __unicode__(self):
return "%s: %s" % (self.feature.name, self.value)
"""
Classes
"""
class Meta:
verbose_name = "File Feature"
verbose_name_plural = "File Features"
class RecordLinkFeature(models.Model):
"""
Represents a Feature of type Record Link which links one Data Set
"""
"""
Fields
"""
record = models.ForeignKey(Record)
feature = models.ForeignKey(Feature)
data_set = models.ForeignKey(DataSet)
value = models.BigIntegerField(null=True, blank=True)
"""
Methods
"""
def __unicode__(self):
return "%s: %s" % (self.feature.name, self.value)
"""
Classes
"""
class Meta:
verbose_name = "Record Link Feature"
verbose_name_plural = "Record Link Features" | gpl-2.0 |
ebilionis/py-orthpol | demos/demo7.py | 2 | 2204 | """
Generate the Legendre polynomials using a scipy.stats random variable.
This particular demo generates the Legendre polynomials.
This demo demonstrates how to:
+ Construct a set of orthogonal univariate polynomials given a scipy.stats
random variable.
+ Examine certain properties of a univariate polynomial.
+ Evaluate the polynomials at one or more points.
+ Evaluate the derivatives of the polynomials at one or more points.
Author:
Ilias Bilionis
Date:
3/18/2014
"""
import orthpol
import math
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
# The desired degree
degree = 4
# The first way of doing it is write down the random variable:
rv = scipy.stats.uniform()
# Construct it:
p = orthpol.OrthogonalPolynomial(degree, rv=rv)
# An orthogonal polynomial is though of as a function.
# Here is how to get the number of inputs and outputs of that function
print 'Number of inputs:', p.num_input
print 'Number of outputs:', p.num_output
# Test if the polynomials are normalized (i.e., their norm is 1.):
print 'Is normalized:', p.is_normalized
# Get the degree of the polynomial:
print 'Polynomial degree:', p.degree
# Get the alpha-beta recursion coefficients:
print 'Alpha:', p.alpha
print 'Beta:', p.beta
# The following should print a description of the polynomial
print str(p)
# Now you can evaluate the polynomial at any points you want:
X = np.linspace(0., 1., 100)
# Here is the actual evaluation
phi = p(X)
# Phi should be a 100x11 matrix: phi(i, j) = poly(i, X[j])
# Let's plot them
plt.plot(X, phi)
plt.title('Legendre Polynomials', fontsize=16)
plt.xlabel('$x$', fontsize=16)
plt.ylabel('$p_i(x)$', fontsize=16)
plt.legend(['$p_{%d}(x)$' % i for i in range(p.num_output)], loc='best')
print 'Close the window to continue...'
plt.show()
# You may also compute the derivatives of the polynomials:
dphi = p.d(X)
# Let's plot them also
plt.plot(X, dphi)
plt.title('Derivatives of Legendre Polynomials', fontsize=16)
plt.xlabel('$x$', fontsize=16)
plt.ylabel(r'$\frac{dp_i(x)}{dx}$', fontsize=16)
plt.legend([r'$\frac{p_{%d}(x)}{dx}$' % i for i in range(p.num_output)], loc='best')
print 'Close the window to end demo...'
plt.show()
| lgpl-2.1 |
Takasudo/studyPython | deep/ch04/gradient_2d.py | 2 | 1600 | # coding: utf-8
# cf.http://d.hatena.ne.jp/white_wheels/20100327/p3
import numpy as np
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
def _numerical_gradient_no_batch(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
for idx in range(x.size):
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val # 値を元に戻す
return grad
def numerical_gradient(f, X):
if X.ndim == 1:
return _numerical_gradient_no_batch(f, X)
else:
grad = np.zeros_like(X)
for idx, x in enumerate(X):
grad[idx] = _numerical_gradient_no_batch(f, x)
return grad
def function_2(x):
if x.ndim == 1:
return np.sum(x**2)
else:
return np.sum(x**2, axis=1)
def tangent_line(f, x):
d = numerical_gradient(f, x)
print(d)
y = f(x) - d*x
return lambda t: d*t + y
if __name__ == '__main__':
x0 = np.arange(-2, 2.5, 0.25)
x1 = np.arange(-2, 2.5, 0.25)
X, Y = np.meshgrid(x0, x1)
X = X.flatten()
Y = Y.flatten()
grad = numerical_gradient(function_2, np.array([X, Y]) )
plt.figure()
plt.quiver(X, Y, -grad[0], -grad[1], angles="xy",color="#666666")#,headwidth=10,scale=40,color="#444444")
plt.xlim([-2, 2])
plt.ylim([-2, 2])
plt.xlabel('x0')
plt.ylabel('x1')
plt.grid()
plt.legend()
plt.draw()
plt.show() | gpl-3.0 |
flaviovdf/pyksc | src/pyksc/regression.py | 1 | 6549 | #-*- coding: utf8
'''
Implementation of some Machine Learning regression models. Basically, we
implement simple wrappers around the scikit-learn library which performs
the transformations and specific training models we need.
'''
from __future__ import division, print_function
from sklearn.base import clone
from sklearn.base import BaseEstimator
from sklearn.base import RegressorMixin
from sklearn.externals.joblib.parallel import Parallel, delayed
from sklearn.linear_model.base import LinearRegression
from sklearn.utils.validation import safe_asarray
import numpy as np
def mean_absolute_error(y_true, y_pred):
"""
Mean absolute error regression loss
Positive floating point value: the best value is 0.0.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
mrae : float
"""
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
return np.mean(np.abs(y_true - y_pred))
def mean_relative_square_error(y_true, y_pred):
"""
Mean relative square error regression loss
Positive floating point value: the best value is 0.0.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
mrse : float
"""
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
return np.mean(((y_pred / y_true) - 1) ** 2)
class RSELinearRegression(LinearRegression):
'''
Implements an ordinary least squares (OLS) linear regression in which
the objective function is the relative squared error (RSE) and not the
absolute error.
This class will use the same parameters and arguments as:
sklearn.linear_model.LinearRegression. Different from the linear
regression, we set `fit_intecept` to False by default.
Parameters
----------
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
See
---
sklearn.linear_model.LinearRegression
'''
def __init__(self, fit_intercept=False, normalize=False, copy_X=True):
super(RSELinearRegression, self).__init__(fit_intercept, normalize,
copy_X)
def fit(self, X, y):
X = safe_asarray(X)
y = np.asarray(y)
X = (X.T / y).T
return super(RSELinearRegression, self).fit(X, y / y)
def _fit_helper(class_, X, y, learner):
return class_, clone(learner).fit(X, y)
def _predict_helper(examples, X, learner):
return examples, learner.predict(X)
class MultiClassRegression(BaseEstimator, RegressorMixin):
'''
This class implements what we call a multi-class regression. In simple
terms, for a dataset with class labels one specialized regression model
is learned for each label. Also, a classification model is learned for the
whole dataset. Thus, when predicting first the classification model is used
to infer classes and secondly the specialized regression model for each
class is used.
Parameters
----------
clf : an instance of `sklearn.base.ClassifierMixin`
this is the classifier to be used. Pass a grid search object when
searching for best parameters is needed
regr : a subclass of `sklearn.base.RegressorMixin`
this is a class object and not a instance of the class. Pass a grid
search object when searching for best parameters is needed
'''
def __init__(self, clf, regr, n_jobs=1, verbose=0, pre_dispatch='2*n_jobs'):
super(MultiClassRegression, self).__init__()
self.clf = clf
self.regr = regr
self.n_jobs = n_jobs
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.clf_model = None
self.regression_models = None
def fit(self, X, y_clf, y_regression):
"""
Fit the multiclass model.
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y_clf : numpy array of shape [n_samples]
Target classes for classification model
y_regression: numpy array of shape [n_samples]
Target values for regression model
Returns
-------
self : returns an instance of self.
"""
X = safe_asarray(X)
y_clf = np.asarray(y_clf)
y_regression = np.asarray(y_regression)
self.clf_model = self.clf.fit(X, y_clf)
classes = set(y_clf)
regr = self.regr
def _generator():
for class_ in classes:
examples = y_clf == class_
yield class_, X[examples], y_regression[examples], regr
out = Parallel(self.n_jobs, self.verbose, self.pre_dispatch)(\
delayed(_fit_helper)(*params) for params in _generator())
self.regression_models = {}
for class_, regr_model in out:
self.regression_models[class_] = regr_model
return self
def predict(self, X, return_class_prediction=False):
"""
Predict using the muticlass regression model
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Returns predicted values.
"""
X = safe_asarray(X)
y_clf_predicted = np.asarray(self.clf_model.predict(X))
classes = set(y_clf_predicted)
def _generator():
for class_ in classes:
examples = y_clf_predicted == class_
yield examples, X[examples], self.regression_models[class_]
out = Parallel(self.n_jobs, self.verbose, self.pre_dispatch)(\
delayed(_predict_helper)(*params) for params in _generator())
y_regr_predicted = None
for examples, predicted in out:
if y_regr_predicted is None:
y_regr_predicted = np.zeros(X.shape[0], predicted.dtype)
y_regr_predicted[examples] = predicted
if return_class_prediction:
return y_clf_predicted, y_regr_predicted
else:
return y_regr_predicted
| bsd-3-clause |
dwhswenson/openpathsampling | openpathsampling/experimental/storage/mdtraj_json.py | 2 | 1220 | from ..simstore.custom_json import JSONCodec
try:
import mdtraj as md
except ImportError:
md = None
HAS_MDTRAJ = False
else:
HAS_MDTRAJ = True
import pandas as pd
def _check_mdtraj():
if not HAS_MDTRAJ:
raise RuntimeError("Unable to import MDTraj.")
def traj_to_dict(obj):
return {'xyz': obj.xyz,
'topology': obj.topology,
'time': obj.time,
'unitcell_lengths': obj.unitcell_lengths,
'unitcell_angles': obj.unitcell_angles}
def traj_from_dict(dct):
_check_mdtraj()
dct = {k: v for k, v in dct.items()
if k not in ['__class__', '__module__']}
return md.Trajectory(**dct)
def topology_to_dict(obj):
dataframe, bonds = obj.to_dataframe()
return {'atoms': dataframe.to_json(),
'bonds': bonds}
def topology_from_dict(dct):
_check_mdtraj()
return md.Topology.from_dataframe(
atoms=pd.read_json(dct['atoms']),
bonds=dct['bonds']
)
if HAS_MDTRAJ:
traj_codec = JSONCodec(md.Trajectory, traj_to_dict, traj_from_dict)
top_codec = JSONCodec(md.Topology, topology_to_dict, topology_from_dict)
mdtraj_codecs = [traj_codec, top_codec]
else:
mdtraj_codecs = []
| mit |
ngoix/OCRF | examples/linear_model/plot_bayesian_ridge.py | 50 | 2733 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
lw = 2
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='lightgreen', linewidth=lw,
label="Bayesian Ridge estimate")
plt.plot(w, color='gold', linewidth=lw, label="Ground truth")
plt.plot(ols.coef_, color='navy', linestyle='--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='gold', log=True)
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='navy', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="upper left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=lw)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
quheng/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
pythonvietnam/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 244 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.