repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
urielz/ArgoView | plot_trajectory.py | 1 | 1805 | def plot_trajectory(tlat,tlon,tkm,profname):
import config
import netCDF4 as nc
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
dlon = 0.4
dlat = 0.4
m = Basemap(projection='merc',llcrnrlat=max(tlat)+dlat,urcrnrlat=min(tlat)-dlat,
llcrnrlon=min(tlon)-dlon,urcrnrlon=max(tlon)+dlon)
geb = nc.Dataset(config.gebf)
topo = geb.variables['elevation'][:]
glon = geb.variables['lon'][:]
glat = geb.variables['lat'][:]
# plot float trajectory
m.bluemarble()
parallels = np.arange(0.,-90,-0.2)
meridians = np.arange(180.,360.,0.2)
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=4)
m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=4)
lon0 = np.argmin(np.abs(np.min(tlon)-dlon-glon))
lon1 = np.argmin(np.abs(np.max(tlon)+dlon-glon))
lat0 = np.argmin(np.abs(np.min(tlat)-dlat-glat))
lat1 = np.argmin(np.abs(np.max(tlat)+dlat-glat))
[LON,LAT] = np.meshgrid(glon[lon0:lon1],glat[lat0:lat1]);
[X,Y] = m(LON,LAT)
cmax = np.max(topo[lat0:lat1,lon0:lon1])
cmin = np.min(topo[lat0:lat1,lon0:lon1])
v = np.linspace(cmin,cmax,5)
co = m.contour(X,Y,topo[lat0:lat1,lon0:lon1],v,colors='w',linestyles='solid')
plt.clabel(co,inline=True,fmt='%1.0f',fontsize=4,colors='w')
del X, Y
x, y = m(tlon,tlat)
m.scatter(x,y,10,marker='o',color='r')
m.plot(x,y,'r-.')
lab = []
for j in range(1,np.size(tkm)+1):
lab = lab + [str(j)+' - '+str(round(tkm[j-1]))+' km']
for label, xpt, ypt in zip(lab, x, y):
plt.text(xpt+1000, ypt+500, label,color='w',size='4')
del x, y
plt.title('Argo profile: '+str(profname),fontsize=5)
#plt.savefig(config.tdir+profname+'.png',dpi=300)
#plt.close()
return;
| gpl-2.0 |
GlobalFishingWatch/vessel-scoring | vessel_scoring/legacy_heuristic_model.py | 1 | 1430 | import numpy as np
from vessel_scoring.utils import get_cols_by_name
import vessel_scoring.base_model
COURSE_STD = 0
SPEED_STD = 1
SPEED_AVG = 2
SHORE_DIST = 3
# TODO: make this inherit from appropriate sklearn classes
class LegacyHeuristicModel(vessel_scoring.base_model.BaseModel):
def __init__(self, window='3600'):
"""
window - window size to use in features
"""
self.window = window
def fit(self, X, y):
return self
def predict_proba(self, X):
X = self._make_features(X)
score = (X[:,COURSE_STD] + X[:,SPEED_STD] + X[:,SPEED_AVG]) * 2.0 / 3.0
score = np.clip(score, 0, 1)
# Port behavior is hard to distinguish from fishing,
# so supress score close to shore...
# not_close_to_shore = X[:,SHORE_DIST] >= 3
# score *= not_close_to_shore
proba = np.zeros([len(X), 2])
proba[:, 0] = 1 - score # Probability not fishing
proba[:, 1] = score
return proba
def _make_features(self, data):
"""Convert dataset into feature matrix suitable for model"""
return get_cols_by_name(data,
['measure_coursestddev_{window}' ,
'measure_speedstddev_{window}',
'measure_speedavg_{window}',
# 'distance_to_shore' # XXX waiting for this feature to be re-added
], window=self.window)
| apache-2.0 |
davidzchen/tensorflow | tensorflow/python/estimator/inputs/pandas_io.py | 41 | 1293 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""pandas_io python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator.inputs import pandas_io
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
pandas_io.__all__ = [s for s in dir(pandas_io) if not s.startswith('__')]
from tensorflow_estimator.python.estimator.inputs.pandas_io import *
| apache-2.0 |
saketkc/statsmodels | statsmodels/datasets/template_data.py | 31 | 1680 | #! /usr/bin/env python
"""Name of dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """E.g., This is public domain."""
TITLE = """Title of the dataset"""
SOURCE = """
This section should provide a link to the original dataset if possible and
attribution and correspondance information for the dataset's original author
if so desired.
"""
DESCRSHORT = """A short description."""
DESCRLONG = """A longer description of the dataset."""
#suggested notes
NOTE = """
::
Number of observations:
Number of variables:
Variable name definitions:
Any other useful information that does not fit into the above categories.
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=0, exog_idx=None, dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=0, exog_idx=None,
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/DatasetName.csv', 'rb'),
delimiter=",", names=True, dtype=float)
return data
| bsd-3-clause |
OpenPIV/openpiv-python | openpiv/validation.py | 2 | 10863 | """A module for spurious vector detection."""
__licence__ = """
Copyright (C) 2011 www.openpiv.net
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
from scipy.ndimage import generic_filter
import matplotlib.pyplot as plt
def global_val(u, v, u_thresholds, v_thresholds):
"""Eliminate spurious vectors with a global threshold.
This validation method tests for the spatial consistency of the data
and outliers vector are replaced with Nan (Not a Number) if at
least one of the two velocity components is out of a specified global
range.
Parameters
----------
u : 2d np.ndarray
a two dimensional array containing the u velocity component.
v : 2d np.ndarray
a two dimensional array containing the v velocity component.
u_thresholds: two elements tuple
u_thresholds = (u_min, u_max). If ``u<u_min`` or ``u>u_max``
the vector is treated as an outlier.
v_thresholds: two elements tuple
``v_thresholds = (v_min, v_max)``. If ``v<v_min`` or ``v>v_max``
the vector is treated as an outlier.
Returns
-------
u : 2d np.ndarray
a two dimensional array containing the u velocity component,
where spurious vectors have been replaced by NaN.
v : 2d np.ndarray
a two dimensional array containing the v velocity component,
where spurious vectors have been replaced by NaN.
mask : boolean 2d np.ndarray
a boolean array. True elements corresponds to outliers.
"""
np.warnings.filterwarnings("ignore")
ind = np.logical_or(
np.logical_or(u < u_thresholds[0], u > u_thresholds[1]),
np.logical_or(v < v_thresholds[0], v > v_thresholds[1]),
)
u[ind] = np.nan
v[ind] = np.nan
mask = np.zeros_like(u, dtype=bool)
mask[ind] = True
return u, v, mask
def global_std(u, v, std_threshold=5):
"""Eliminate spurious vectors with a global threshold defined by the
standard deviation
This validation method tests for the spatial consistency of the data
and outliers vector are replaced with NaN (Not a Number) if at least
one of the two velocity components is out of a specified global range.
Parameters
----------
u : 2d masked np.ndarray
a two dimensional array containing the u velocity component.
v : 2d masked np.ndarray
a two dimensional array containing the v velocity component.
std_threshold: float
If the length of the vector (actually the sum of squared components) is
larger than std_threshold times standard deviation of the flow field,
then the vector is treated as an outlier. [default = 3]
Returns
-------
u : 2d np.ndarray
a two dimensional array containing the u velocity component,
where spurious vectors have been replaced by NaN.
v : 2d np.ndarray
a two dimensional array containing the v velocity component,
where spurious vectors have been replaced by NaN.
mask : boolean 2d np.ndarray
a boolean array. True elements corresponds to outliers.
"""
# both previous nans and masked regions are not
# participating in the magnitude comparison
# def reject_outliers(data, m=2):
# return data[abs(data - np.mean(data)) < m * np.std(data)]
# create nan filled arrays where masks
# if u,v, are non-masked, ma.copy() adds false masks
tmpu = np.ma.copy(u).filled(np.nan)
tmpv = np.ma.copy(v).filled(np.nan)
ind = np.logical_or(np.abs(tmpu - np.nanmean(tmpu)) > std_threshold * np.nanstd(tmpu),
np.abs(tmpv - np.nanmean(tmpv)) > std_threshold * np.nanstd(tmpv))
if np.all(ind): # if all is True, something is really wrong
print('Warning! probably a uniform shift data, do not use this filter')
ind = ~ind
u[ind] = np.nan
v[ind] = np.nan
mask = np.zeros_like(u, dtype=bool)
mask[ind] = True
return u, v, mask
def sig2noise_val(u, v, s2n, w=None, threshold=1.05):
"""Eliminate spurious vectors from cross-correlation signal to noise ratio.
Replace spurious vectors with zero if signal to noise ratio
is below a specified threshold.
Parameters
----------
u : 2d or 3d np.ndarray
a two or three dimensional array containing the u velocity component.
v : 2d or 3d np.ndarray
a two or three dimensional array containing the v velocity component.
s2n : 2d np.ndarray
a two or three dimensional array containing the value of the signal to
noise ratio from cross-correlation function.
w : 2d or 3d np.ndarray
a two or three dimensional array containing the w (in z-direction)
velocity component.
threshold: float
the signal to noise ratio threshold value.
Returns
-------
u : 2d or 3d np.ndarray
a two or three dimensional array containing the u velocity component,
where spurious vectors have been replaced by NaN.
v : 2d or 3d np.ndarray
a two or three dimensional array containing the v velocity component,
where spurious vectors have been replaced by NaN.
w : 2d or 3d np.ndarray
optional, a two or three dimensional array containing the w
(in z-direction) velocity component, where spurious vectors
have been replaced by NaN.
mask : boolean 2d np.ndarray
a boolean array. True elements corresponds to outliers.
References
----------
R. D. Keane and R. J. Adrian, Measurement Science & Technology, 1990,
1, 1202-1215.
"""
ind = s2n < threshold
u[ind] = np.nan
v[ind] = np.nan
mask = np.zeros_like(u, dtype=bool)
mask[ind] = True
if isinstance(w, np.ndarray):
w[ind] = np.nan
return u, v, w, mask
return u, v, mask
def local_median_val(u, v, u_threshold, v_threshold, size=1):
"""Eliminate spurious vectors with a local median threshold.
This validation method tests for the spatial consistency of the data.
Vectors are classified as outliers and replaced with Nan (Not a Number) if
the absolute difference with the local median is greater than a user
specified threshold. The median is computed for both velocity components.
The image masked areas (obstacles, reflections) are marked as masked array:
u = np.ma.masked(u, mask = image_mask)
and it should not be replaced by the local median, but remain masked.
Parameters
----------
u : 2d np.ndarray
a two dimensional array containing the u velocity component.
v : 2d np.ndarray
a two dimensional array containing the v velocity component.
u_threshold : float
the threshold value for component u
v_threshold : float
the threshold value for component v
Returns
-------
u : 2d np.ndarray
a two dimensional array containing the u velocity component,
where spurious vectors have been replaced by NaN.
v : 2d np.ndarray
a two dimensional array containing the v velocity component,
where spurious vectors have been replaced by NaN.
mask : boolean 2d np.ndarray
a boolean array. True elements corresponds to outliers.
"""
# make a copy of the data without the masked region, fill it also with
# NaN and then use generic filter with nanmean
#
u = np.ma.copy(u)
v = np.ma.copy(v)
# kernel footprint
f = np.ones((2*size+1, 2*size+1))
f[size,size] = 0
masked_u = np.where(~u.mask, u.data, np.nan)
masked_v = np.where(~v.mask, v.data, np.nan)
um = generic_filter(masked_u, np.nanmedian, mode='constant',
cval=np.nan, footprint=f)
vm = generic_filter(masked_v, np.nanmedian, mode='constant',
cval=np.nan, footprint=f)
ind = (np.abs((u - um)) > u_threshold) | (np.abs((v - vm)) > v_threshold)
u[ind] = np.nan
v[ind] = np.nan
mask = np.zeros(u.shape, dtype=bool)
mask[ind] = True
return u, v, mask
def typical_validation(u, v, s2n, settings):
"""
validation using gloabl limits and std and local median,
with a special option of 'no_std' for the case of completely
uniform shift, e.g. in tests.
see Settings() for the parameters:
MinMaxU : two elements tuple
sets the limits of the u displacment component
Used for validation.
MinMaxV : two elements tuple
sets the limits of the v displacment component
Used for validation.
std_threshold : float
sets the threshold for the std validation
median_threshold : float
sets the threshold for the median validation
"""
if settings.show_all_plots:
plt.figure()
plt.quiver(u,v,color='b')
mask = np.zeros(u.shape, dtype=bool)
u, v, mask_g = global_val(
u, v, settings.MinMax_U_disp, settings.MinMax_V_disp
)
# print(f"global filter invalidated {sum(mask_g.flatten())} vectors")
if settings.show_all_plots:
plt.quiver(u,v,color='m')
u, v, mask_s = global_std(
u, v, std_threshold=settings.std_threshold
)
# print(f"std filter invalidated {sum(mask_s.flatten())} vectors")
if settings.show_all_plots:
plt.quiver(u,v,color='k')
u, v, mask_m = local_median_val(
u,
v,
u_threshold=settings.median_threshold,
v_threshold=settings.median_threshold,
size=settings.median_size,
)
if settings.show_all_plots:
plt.quiver(u,v,color='r')
# print(f"median filter invalidated {sum(mask_m.flatten())} vectors")
mask = mask + mask_g + mask_m + mask_s
if settings.sig2noise_validate:
u, v, mask_s2n = sig2noise_val(
u, v, s2n,
threshold=settings.sig2noise_threshold
)
# print(f"s2n filter invalidated {sum(mask_s2n.flatten())} vectors")
if settings.show_all_plots:
plt.quiver(u,v,color='g')
plt.show()
if settings.show_all_plots and sum(mask_s2n.flatten()): # if not all NaN
plt.figure()
plt.hist(s2n[s2n>0].flatten(),31)
plt.show()
mask += mask_s2n
return u, v, mask | gpl-3.0 |
mdhaber/scipy | scipy/stats/_multivariate.py | 5 | 153946 | #
# Author: Joris Vankerschaver 2013
#
import math
import numpy as np
from numpy import asarray_chkfinite, asarray
import scipy.linalg
from scipy._lib import doccer
from scipy.special import gammaln, psi, multigammaln, xlogy, entr, betaln
from scipy._lib._util import check_random_state
from scipy.linalg.blas import drot
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
from ._discrete_distns import binom
from . import mvn
__all__ = ['multivariate_normal',
'matrix_normal',
'dirichlet',
'wishart',
'invwishart',
'multinomial',
'special_ortho_group',
'ortho_group',
'random_correlation',
'unitary_group',
'multivariate_t',
'multivariate_hypergeom']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
_doc_random_state = """\
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD:
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
class multi_rv_generic:
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super().__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the Generator object for generating random variates.
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen:
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
_mvn_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_mvn_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_mvn_doc_frozen_callparams = ""
_mvn_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvn_docdict_params = {
'_mvn_doc_default_callparams': _mvn_doc_default_callparams,
'_mvn_doc_callparams_note': _mvn_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvn_docdict_noparams = {
'_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,
'_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_normal_gen(multi_rv_generic):
r"""A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``cdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)``
Cumulative distribution function.
``logcdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)``
Log of the cumulative distribution function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_mvn_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _process_parameters(self, dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be "
"a scalar.")
# Check input sizes and return full arrays for mean and cov if
# necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." %
dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""Log of the multivariate normal probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean=None, cov=1, allow_singular=False):
"""Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Log of the probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean=None, cov=1, allow_singular=False):
"""Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def _cdf(self, x, mean, cov, maxpts, abseps, releps):
"""Log of the multivariate normal cumulative distribution function.
Parameters
----------
x : ndarray
Points at which to evaluate the cumulative distribution function.
mean : ndarray
Mean of the distribution
cov : array_like
Covariance matrix of the distribution
maxpts : integer
The maximum number of points to use for integration
abseps : float
Absolute error tolerance
releps : float
Relative error tolerance
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'cdf' instead.
.. versionadded:: 1.0.0
"""
lower = np.full(mean.shape, -np.inf)
# mvnun expects 1-d arguments, so process points sequentially
func1d = lambda x_slice: mvn.mvnun(lower, x_slice, mean, cov,
maxpts, abseps, releps)[0]
out = np.apply_along_axis(func1d, -1, x)
return _squeeze_output(out)
def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
abseps=1e-5, releps=1e-5):
"""Log of the multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts : integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps : float, optional
Absolute error tolerance (default 1e-5)
releps : float, optional
Relative error tolerance (default 1e-5)
Returns
-------
cdf : ndarray or scalar
Log of the cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
# Use _PSD to check covariance matrix
_PSD(cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * dim
out = np.log(self._cdf(x, mean, cov, maxpts, abseps, releps))
return out
def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
abseps=1e-5, releps=1e-5):
"""Multivariate normal cumulative distribution function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
maxpts : integer, optional
The maximum number of points to use for integration
(default `1000000*dim`)
abseps : float, optional
Absolute error tolerance (default 1e-5)
releps : float, optional
Relative error tolerance (default 1e-5)
Returns
-------
cdf : ndarray or scalar
Cumulative distribution function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
.. versionadded:: 1.0.0
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
# Use _PSD to check covariance matrix
_PSD(cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * dim
out = self._cdf(x, mean, cov, maxpts, abseps, releps)
return out
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_mvn_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None,
maxpts=None, abseps=1e-5, releps=1e-5):
"""Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
maxpts : integer, optional
The maximum number of points to use for integration of the
cumulative distribution function (default `1000000*dim`)
abseps : float, optional
Absolute error tolerance for the cumulative distribution function
(default 1e-5)
releps : float, optional
Relative error tolerance for the cumulative distribution function
(default 1e-5)
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self._dist = multivariate_normal_gen(seed)
self.dim, self.mean, self.cov = self._dist._process_parameters(
None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
if not maxpts:
maxpts = 1000000 * self.dim
self.maxpts = maxpts
self.abseps = abseps
self.releps = releps
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def logcdf(self, x):
return np.log(self.cdf(x))
def cdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._cdf(x, self.mean, self.cov, self.maxpts, self.abseps,
self.releps)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'logcdf', 'cdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
mvn_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params)
_matnorm_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default: `None`)
rowcov : array_like, optional
Among-row covariance matrix of the distribution (default: `1`)
colcov : array_like, optional
Among-column covariance matrix of the distribution (default: `1`)
"""
_matnorm_doc_callparams_note = \
"""If `mean` is set to `None` then a matrix of zeros is used for the mean.
The dimensions of this matrix are inferred from the shape of `rowcov` and
`colcov`, if these are provided, or set to `1` if ambiguous.
`rowcov` and `colcov` can be two-dimensional array_likes specifying the
covariance matrices directly. Alternatively, a one-dimensional array will
be be interpreted as the entries of a diagonal matrix, and a scalar or
zero-dimensional array will be interpreted as this value times the
identity matrix.
"""
_matnorm_doc_frozen_callparams = ""
_matnorm_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
matnorm_docdict_params = {
'_matnorm_doc_default_callparams': _matnorm_doc_default_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
matnorm_docdict_noparams = {
'_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class matrix_normal_gen(multi_rv_generic):
r"""A matrix normal random variable.
The `mean` keyword specifies the mean. The `rowcov` keyword specifies the
among-row covariance matrix. The 'colcov' keyword specifies the
among-column covariance matrix.
Methods
-------
``pdf(X, mean=None, rowcov=1, colcov=1)``
Probability density function.
``logpdf(X, mean=None, rowcov=1, colcov=1)``
Log of the probability density function.
``rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)``
Draw random samples.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" matrix normal
random variable:
rv = matrix_normal(mean=None, rowcov=1, colcov=1)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_matnorm_doc_callparams_note)s
The covariance matrices specified by `rowcov` and `colcov` must be
(symmetric) positive definite. If the samples in `X` are
:math:`m \times n`, then `rowcov` must be :math:`m \times m` and
`colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`.
The probability density function for `matrix_normal` is
.. math::
f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}}
\exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1}
(X-M)^T \right] \right),
where :math:`M` is the mean, :math:`U` the among-row covariance matrix,
:math:`V` the among-column covariance matrix.
The `allow_singular` behaviour of the `multivariate_normal`
distribution is not currently supported. Covariance matrices must be
full rank.
The `matrix_normal` distribution is closely related to the
`multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)`
(the vector formed by concatenating the columns of :math:`X`) has a
multivariate normal distribution with mean :math:`\mathrm{Vec}(M)`
and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker
product). Sampling and pdf evaluation are
:math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but
:math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,
making this equivalent form algorithmically inefficient.
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.stats import matrix_normal
>>> M = np.arange(6).reshape(3,2); M
array([[0, 1],
[2, 3],
[4, 5]])
>>> U = np.diag([1,2,3]); U
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> V = 0.3*np.identity(2); V
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1; X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
0.023410202050005054
>>> # Equivalent multivariate normal
>>> from scipy.stats import multivariate_normal
>>> vectorised_X = X.T.flatten()
>>> equiv_mean = M.T.flatten()
>>> equiv_cov = np.kron(V,U)
>>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)
0.023410202050005054
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)
def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""Create a frozen matrix normal distribution.
See `matrix_normal_frozen` for more information.
"""
return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)
def _process_parameters(self, mean, rowcov, colcov):
"""
Infer dimensionality from mean or covariance matrices. Handle
defaults. Ensure compatible dimensions.
"""
# Process mean
if mean is not None:
mean = np.asarray(mean, dtype=float)
meanshape = mean.shape
if len(meanshape) != 2:
raise ValueError("Array `mean` must be two dimensional.")
if np.any(meanshape == 0):
raise ValueError("Array `mean` has invalid shape.")
# Process among-row covariance
rowcov = np.asarray(rowcov, dtype=float)
if rowcov.ndim == 0:
if mean is not None:
rowcov = rowcov * np.identity(meanshape[0])
else:
rowcov = rowcov * np.identity(1)
elif rowcov.ndim == 1:
rowcov = np.diag(rowcov)
rowshape = rowcov.shape
if len(rowshape) != 2:
raise ValueError("`rowcov` must be a scalar or a 2D array.")
if rowshape[0] != rowshape[1]:
raise ValueError("Array `rowcov` must be square.")
if rowshape[0] == 0:
raise ValueError("Array `rowcov` has invalid shape.")
numrows = rowshape[0]
# Process among-column covariance
colcov = np.asarray(colcov, dtype=float)
if colcov.ndim == 0:
if mean is not None:
colcov = colcov * np.identity(meanshape[1])
else:
colcov = colcov * np.identity(1)
elif colcov.ndim == 1:
colcov = np.diag(colcov)
colshape = colcov.shape
if len(colshape) != 2:
raise ValueError("`colcov` must be a scalar or a 2D array.")
if colshape[0] != colshape[1]:
raise ValueError("Array `colcov` must be square.")
if colshape[0] == 0:
raise ValueError("Array `colcov` has invalid shape.")
numcols = colshape[0]
# Ensure mean and covariances compatible
if mean is not None:
if meanshape[0] != numrows:
raise ValueError("Arrays `mean` and `rowcov` must have the "
"same number of rows.")
if meanshape[1] != numcols:
raise ValueError("Arrays `mean` and `colcov` must have the "
"same number of columns.")
else:
mean = np.zeros((numrows, numcols))
dims = (numrows, numcols)
return dims, mean, rowcov, colcov
def _process_quantiles(self, X, dims):
"""
Adjust quantiles array so that last two axes labels the components of
each data point.
"""
X = np.asarray(X, dtype=float)
if X.ndim == 2:
X = X[np.newaxis, :]
if X.shape[-2:] != dims:
raise ValueError("The shape of array `X` is not compatible "
"with the distribution parameters.")
return X
def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,
col_prec_rt, log_det_colcov):
"""Log of the matrix normal probability density function.
Parameters
----------
dims : tuple
Dimensions of the matrix variates
X : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
row_prec_rt : ndarray
A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)
is the inverse of the among-row covariance matrix
log_det_rowcov : float
Logarithm of the determinant of the among-row covariance matrix
col_prec_rt : ndarray
A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)
is the inverse of the among-column covariance matrix
log_det_colcov : float
Logarithm of the determinant of the among-column covariance matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
numrows, numcols = dims
roll_dev = np.rollaxis(X-mean, axis=-1, start=0)
scale_dev = np.tensordot(col_prec_rt.T,
np.dot(roll_dev, row_prec_rt), 1)
maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)
return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov
+ numrows*log_det_colcov + maha)
def logpdf(self, X, mean=None, rowcov=1, colcov=1):
"""Log of the matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
logpdf : ndarray
Log of the probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
X = self._process_quantiles(X, dims)
rowpsd = _PSD(rowcov, allow_singular=False)
colpsd = _PSD(colcov, allow_singular=False)
out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,
colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X, mean=None, rowcov=1, colcov=1):
"""Matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
return np.exp(self.logpdf(X, mean, rowcov, colcov))
def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):
"""Draw random samples from a matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `dims`), where `dims` is the
dimension of the random matrices.
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
size = int(size)
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
rowchol = scipy.linalg.cholesky(rowcov, lower=True)
colchol = scipy.linalg.cholesky(colcov, lower=True)
random_state = self._get_random_state(random_state)
std_norm = random_state.standard_normal(size=(dims[1], size, dims[0]))
roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1)
out = np.rollaxis(roll_rvs.T, axis=1, start=0) + mean[np.newaxis, :, :]
if size == 1:
out = out.reshape(mean.shape)
return out
matrix_normal = matrix_normal_gen()
class matrix_normal_frozen(multi_rv_frozen):
"""Create a frozen matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Examples
--------
>>> from scipy.stats import matrix_normal
>>> distn = matrix_normal(mean=np.zeros((3,3)))
>>> X = distn.rvs(); X
array([[-0.02976962, 0.93339138, -0.09663178],
[ 0.67405524, 0.28250467, -0.93308929],
[-0.31144782, 0.74535536, 1.30412916]])
>>> distn.pdf(X)
2.5160642368346784e-05
>>> distn.logpdf(X)
-10.590229595124615
"""
def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):
self._dist = matrix_normal_gen(seed)
self.dims, self.mean, self.rowcov, self.colcov = \
self._dist._process_parameters(mean, rowcov, colcov)
self.rowpsd = _PSD(self.rowcov, allow_singular=False)
self.colpsd = _PSD(self.colcov, allow_singular=False)
def logpdf(self, X):
X = self._dist._process_quantiles(X, self.dims)
out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,
self.rowpsd.log_pdet, self.colpsd.U,
self.colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X):
return np.exp(self.logpdf(X))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,
random_state)
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = matrix_normal_gen.__dict__[name]
method_frozen = matrix_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
matnorm_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = %s." % (alpha.shape, ))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = %s "
"and x.shape = %s." % (alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) < 0:
raise ValueError("Each entry in 'x' must be greater than or equal "
"to zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
# Check x_i > 0 or alpha_i > 1
xeq0 = (x == 0)
alphalt1 = (alpha < 1)
if x.shape != alpha.shape:
alphalt1 = np.repeat(alphalt1, x.shape[-1], axis=-1).reshape(x.shape)
chk = np.logical_and(xeq0, alphalt1)
if np.sum(chk):
raise ValueError("Each entry in 'x' must be greater than zero if its "
"alpha is less than one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""Internal helper function to compute the log of the useful quotient.
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}
{\Gamma\left(\sum_{i=1}^{K} \alpha_i \right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""A Dirichlet random variable.
The ``alpha`` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the Dirichlet distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i = 1
where :math:`0 < x_i < 1`.
If the quantiles don't lie within the simplex, a ValueError is raised.
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the dirichlet interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
Examples
--------
>>> from scipy.stats import dirichlet
Generate a dirichlet random variable
>>> quantiles = np.array([0.2, 0.2, 0.6]) # specify quantiles
>>> alpha = np.array([0.4, 5, 15]) # specify concentration parameters
>>> dirichlet.pdf(quantiles, alpha)
0.2843831684937255
The same PDF but following a log scale
>>> dirichlet.logpdf(quantiles, alpha)
-1.2574327653159187
Once we specify the dirichlet distribution
we can then calculate quantities of interest
>>> dirichlet.mean(alpha) # get the mean of the distribution
array([0.01960784, 0.24509804, 0.73529412])
>>> dirichlet.var(alpha) # get variance
array([0.00089829, 0.00864603, 0.00909517])
>>> dirichlet.entropy(alpha) # calculate the differential entropy
-4.3280162474082715
We can also return random samples from the distribution
>>> dirichlet.rvs(alpha, size=1, random_state=1)
array([[0.00766178, 0.24670518, 0.74563305]])
>>> dirichlet.rvs(alpha, size=2, random_state=2)
array([[0.01639427, 0.1292273 , 0.85437844],
[0.00156917, 0.19033695, 0.80809388]])
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""Log of the Dirichlet probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((xlogy(alpha - 1, x.T)).T, 0)
def logpdf(self, x, alpha):
"""Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray or scalar
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : ndarray or scalar
Mean of the Dirichlet distribution.
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : ndarray or scalar
Variance of the Dirichlet distribution.
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return _squeeze_output(out)
def entropy(self, alpha):
"""Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix). These arguments must satisfy the relationship
``df > scale.ndim - 1``, but see notes on using the `rvs` method with
``df < scale.ndim``.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
The algorithm [2]_ implemented by the `rvs` method may
produce numerically singular matrices with :math:`p - 1 < \nu < p`; the
user may wish to check for this condition and generate replacement samples
as necessary.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis, np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df <= dim - 1:
raise ValueError("Degrees of freedom must be greater than the "
"dimension of scale matrix minus 1.")
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""Log of the Wishart probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.empty(x.shape[-1])
scale_inv_x = np.empty(x.shape)
tr_scale_inv_x = np.empty(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:, :, i])
scale_inv_x[:, :, i] = scipy.linalg.cho_solve((C, True), x[:, :, i])
tr_scale_inv_x[i] = scale_inv_x[:, :, i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""Mean of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""Mean of the Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""Mode of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""Variance of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""Variance of the Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = (np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) +
shape[::-1]).T)
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None, None, None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""Draw random samples from a Wishart distribution.
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""Compute the differential entropy of the Wishart.
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See Also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf', 'potri'), (a1,))
triu_rows, triu_cols = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_rows, triu_cols] = a1[index][triu_cols, triu_rows]
return a1
class invwishart_gen(wishart_gen):
r"""An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications
in Statistics - Simulation and Computation, vol. 14.2, pp.511-514,
1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""Log of the inverse Wishart probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.empty(x.shape[-1])
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.empty(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:, :, i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""Mean of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""Mean of the inverse Wishart distribution.
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""Mode of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""Mode of the inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""Variance of the inverse Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""Variance of the inverse Wishart distribution.
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""Draw random samples from an inverse Wishart distribution.
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super()._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : {None, int, `numpy.random.Generator`}, optional
If `seed` is None the `numpy.random.Generator` singleton is used.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
_multinomial_doc_default_callparams = """\
n : int
Number of trials
p : array_like
Probability of a trial falling into each category; should sum to 1
"""
_multinomial_doc_callparams_note = \
"""`n` should be a positive integer. Each element of `p` should be in the
interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to
1, the last element of the `p` array is not used and is replaced with the
remaining probability left over from the earlier elements.
"""
_multinomial_doc_frozen_callparams = ""
_multinomial_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
multinomial_docdict_params = {
'_doc_default_callparams': _multinomial_doc_default_callparams,
'_doc_callparams_note': _multinomial_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
multinomial_docdict_noparams = {
'_doc_default_callparams': _multinomial_doc_frozen_callparams,
'_doc_callparams_note': _multinomial_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multinomial_gen(multi_rv_generic):
r"""A multinomial random variable.
Methods
-------
``pmf(x, n, p)``
Probability mass function.
``logpmf(x, n, p)``
Log of the probability mass function.
``rvs(n, p, size=1, random_state=None)``
Draw random samples from a multinomial distribution.
``entropy(n, p)``
Compute the entropy of the multinomial distribution.
``cov(n, p)``
Compute the covariance matrix of the multinomial distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
Alternatively, the object may be called (as a function) to fix the `n` and
`p` parameters, returning a "frozen" multinomial random variable:
The probability mass function for `multinomial` is
.. math::
f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k},
supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a
nonnegative integer and their sum is :math:`n`.
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy.stats import multinomial
>>> rv = multinomial(8, [0.3, 0.2, 0.5])
>>> rv.pmf([1, 3, 4])
0.042000000000000072
The multinomial distribution for :math:`k=2` is identical to the
corresponding binomial distribution (tiny numerical differences
notwithstanding):
>>> from scipy.stats import binom
>>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6])
0.29030399999999973
>>> binom.pmf(3, 7, 0.4)
0.29030400000000012
The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support
broadcasting, under the convention that the vector parameters (``x`` and
``p``) are interpreted as if each row along the last axis is a single
object. For instance:
>>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7])
array([0.2268945, 0.25412184])
Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``,
but following the rules mentioned above they behave as if the rows
``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single
object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and
``p.shape = ()``. To obtain the individual elements without broadcasting,
we would do this:
>>> multinomial.pmf([3, 4], n=7, p=[.3, .7])
0.2268945
>>> multinomial.pmf([3, 5], 8, p=[.3, .7])
0.25412184
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``p.shape[-1]``. For example:
>>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
array([[[ 0.84, -0.84],
[-0.84, 0.84]],
[[ 1.2 , -1.2 ],
[-1.2 , 1.2 ]]])
In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and
following the rules above, these broadcast as if ``p.shape == (2,)``.
Thus the result should also be of shape ``(2,)``, but since each output is
a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``,
where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and
``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``.
See also
--------
scipy.stats.binom : The binomial distribution.
numpy.random.Generator.multinomial : Sampling from the multinomial distribution.
scipy.stats.multivariate_hypergeom :
The multivariate hypergeometric distribution.
""" # noqa: E501
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = \
doccer.docformat(self.__doc__, multinomial_docdict_params)
def __call__(self, n, p, seed=None):
"""Create a frozen multinomial distribution.
See `multinomial_frozen` for more information.
"""
return multinomial_frozen(n, p, seed)
def _process_parameters(self, n, p):
"""Returns: n_, p_, npcond.
n_ and p_ are arrays of the correct shape; npcond is a boolean array
flagging values out of the domain.
"""
p = np.array(p, dtype=np.float64, copy=True)
p[..., -1] = 1. - p[..., :-1].sum(axis=-1)
# true for bad p
pcond = np.any(p < 0, axis=-1)
pcond |= np.any(p > 1, axis=-1)
n = np.array(n, dtype=np.int_, copy=True)
# true for bad n
ncond = n <= 0
return n, p, ncond | pcond
def _process_quantiles(self, x, n, p):
"""Returns: x_, xcond.
x_ is an int array; xcond is a boolean array flagging values out of the
domain.
"""
xx = np.asarray(x, dtype=np.int_)
if xx.ndim == 0:
raise ValueError("x must be an array.")
if xx.size != 0 and not xx.shape[-1] == p.shape[-1]:
raise ValueError("Size of each quantile should be size of p: "
"received %d, but expected %d." %
(xx.shape[-1], p.shape[-1]))
# true for x out of the domain
cond = np.any(xx != x, axis=-1)
cond |= np.any(xx < 0, axis=-1)
cond = cond | (np.sum(xx, axis=-1) != n)
return xx, cond
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
if result.ndim == 0:
return bad_value
result[...] = bad_value
return result
def _logpmf(self, x, n, p):
return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1)
def logpmf(self, x, n, p):
"""Log of the Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x, xcond = self._process_quantiles(x, n, p)
result = self._logpmf(x, n, p)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, np.NINF)
# replace values bad for n or p; broadcast npcond to the right shape
npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_)
return self._checkresult(result, npcond_, np.NAN)
def pmf(self, x, n, p):
"""Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpmf(x, n, p))
def mean(self, n, p):
"""Mean of the Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
result = n[..., np.newaxis]*p
return self._checkresult(result, npcond, np.NAN)
def cov(self, n, p):
"""Covariance matrix of the multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : ndarray
The covariance matrix of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
nn = n[..., np.newaxis, np.newaxis]
result = nn * np.einsum('...j,...k->...jk', -p, p)
# change the diagonal
for i in range(p.shape[-1]):
result[..., i, i] += n*p[..., i]
return self._checkresult(result, npcond, np.nan)
def entropy(self, n, p):
r"""Compute the entropy of the multinomial distribution.
The entropy is computed using this expression:
.. math::
f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i +
\sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x!
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Multinomial distribution
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x = np.r_[1:np.max(n)+1]
term1 = n*np.sum(entr(p), axis=-1)
term1 -= gammaln(n+1)
n = n[..., np.newaxis]
new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
x.shape += (1,)*new_axes_needed
term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1),
axis=(-1, -1-new_axes_needed))
return self._checkresult(term1 + term2, npcond, np.nan)
def rvs(self, n, p, size=None, random_state=None):
"""Draw random samples from a Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of shape (`size`, `len(p)`)
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
random_state = self._get_random_state(random_state)
return random_state.multinomial(n, p, size)
multinomial = multinomial_gen()
class multinomial_frozen(multi_rv_frozen):
r"""Create a frozen Multinomial distribution.
Parameters
----------
n : int
number of trials
p: array_like
probability of a trial falling into each category; should sum to 1
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
def __init__(self, n, p, seed=None):
self._dist = multinomial_gen(seed)
self.n, self.p, self.npcond = self._dist._process_parameters(n, p)
# monkey patch self._dist
def _process_parameters(n, p):
return self.n, self.p, self.npcond
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.n, self.p)
def pmf(self, x):
return self._dist.pmf(x, self.n, self.p)
def mean(self):
return self._dist.mean(self.n, self.p)
def cov(self):
return self._dist.cov(self.n, self.p)
def entropy(self):
return self._dist.entropy(self.n, self.p)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.n, self.p, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multinomial and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']:
method = multinomial_gen.__dict__[name]
method_frozen = multinomial_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, multinomial_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
multinomial_docdict_params)
class special_ortho_group_gen(multi_rv_generic):
r"""A matrix-valued SO(N) random variable.
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from SO(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
-----
This class is wrapping the random_rot code from the MDP Toolkit,
https://github.com/mdp-toolkit/mdp-toolkit
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., "The efficient generation of random orthogonal
matrices with an application to condition estimators", SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
https://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization
See also the similar `ortho_group`. For a random rotation in three
dimensions, see `scipy.spatial.transform.Rotation.random`.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> x = special_ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> scipy.linalg.det(x)
1.0
This generates one random matrix from SO(3). It is orthogonal and
has a determinant of 1.
See Also
--------
ortho_group, scipy.spatial.transform.Rotation.random
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""Create a frozen SO(N) distribution.
See `special_ortho_group_frozen` for more information.
"""
return special_ortho_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("""Dimension of rotation must be specified,
and must be a scalar greater than 1.""")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from SO(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
H = np.eye(dim)
D = np.empty((dim,))
for n in range(dim-1):
x = random_state.normal(size=(dim-n,))
norm2 = np.dot(x, x)
x0 = x[0].item()
D[n] = np.sign(x[0]) if x[0] != 0 else 1
x[0] += D[n]*np.sqrt(norm2)
x /= np.sqrt((norm2 - x0**2 + x[0]**2) / 2.)
# Householder transformation
H[:, n:] -= np.outer(np.dot(H[:, n:], x), x)
D[-1] = (-1)**(dim-1)*D[:-1].prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return H
special_ortho_group = special_ortho_group_gen()
class special_ortho_group_frozen(multi_rv_frozen):
def __init__(self, dim=None, seed=None):
"""Create a frozen SO(N) distribution.
Parameters
----------
dim : scalar
Dimension of matrices
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> g = special_ortho_group(5)
>>> x = g.rvs()
"""
self._dist = special_ortho_group_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
class ortho_group_gen(multi_rv_generic):
r"""A matrix-valued O(N) random variable.
Return a random orthogonal matrix, drawn from the O(N) Haar
distribution (the only uniform distribution on O(N)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from O(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
-----
This class is closely related to `special_ortho_group`.
Some care is taken to avoid numerical error, as per the paper by Mezzadri.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> from scipy.stats import ortho_group
>>> x = ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> np.fabs(scipy.linalg.det(x))
1.0
This generates one random matrix from O(3). It is orthogonal and
has a determinant of +1 or -1.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from O(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
H = np.eye(dim)
for n in range(dim):
x = random_state.normal(size=(dim-n,))
norm2 = np.dot(x, x)
x0 = x[0].item()
# random sign, 50/50, but chosen carefully to avoid roundoff error
D = np.sign(x[0]) if x[0] != 0 else 1
x[0] += D * np.sqrt(norm2)
x /= np.sqrt((norm2 - x0**2 + x[0]**2) / 2.)
# Householder transformation
H[:, n:] = -D * (H[:, n:] - np.outer(np.dot(H[:, n:], x), x))
return H
ortho_group = ortho_group_gen()
class random_correlation_gen(multi_rv_generic):
r"""A random correlation matrix.
Return a random correlation matrix, given a vector of eigenvalues.
The `eigs` keyword specifies the eigenvalues of the correlation matrix,
and implies the dimension.
Methods
-------
``rvs(eigs=None, random_state=None)``
Draw random correlation matrices, all with eigenvalues eigs.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix.
Notes
-----
Generates a random correlation matrix following a numerically stable
algorithm spelled out by Davies & Higham. This algorithm uses a single O(N)
similarity transformation to construct a symmetric positive semi-definite
matrix, and applies a series of Givens rotations to scale it to have ones
on the diagonal.
References
----------
.. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation
of correlation matrices and their factors", BIT 2000, Vol. 40,
No. 4, pp. 640 651
Examples
--------
>>> from scipy.stats import random_correlation
>>> rng = np.random.default_rng()
>>> x = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=rng)
>>> x
array([[ 1. , -0.07198934, -0.20411041, -0.24385796],
[-0.07198934, 1. , 0.12968613, -0.29471382],
[-0.20411041, 0.12968613, 1. , 0.2828693 ],
[-0.24385796, -0.29471382, 0.2828693 , 1. ]])
>>> import scipy.linalg
>>> e, v = scipy.linalg.eigh(x)
>>> e
array([ 0.5, 0.8, 1.2, 1.5])
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, eigs, tol):
eigs = np.asarray(eigs, dtype=float)
dim = eigs.size
if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1:
raise ValueError("Array 'eigs' must be a vector of length "
"greater than 1.")
if np.fabs(np.sum(eigs) - dim) > tol:
raise ValueError("Sum of eigenvalues must equal dimensionality.")
for x in eigs:
if x < -tol:
raise ValueError("All eigenvalues must be non-negative.")
return dim, eigs
def _givens_to_1(self, aii, ajj, aij):
"""Computes a 2x2 Givens matrix to put 1's on the diagonal.
The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ].
The output matrix g is a 2x2 anti-symmetric matrix of the form
[ c s ; -s c ]; the elements c and s are returned.
Applying the output matrix to the input matrix (as b=g.T M g)
results in a matrix with bii=1, provided tr(M) - det(M) >= 1
and floating point issues do not occur. Otherwise, some other
valid rotation is returned. When tr(M)==2, also bjj=1.
"""
aiid = aii - 1.
ajjd = ajj - 1.
if ajjd == 0:
# ajj==1, so swap aii and ajj to avoid division by zero
return 0., 1.
dd = math.sqrt(max(aij**2 - aiid*ajjd, 0))
# The choice of t should be chosen to avoid cancellation [1]
t = (aij + math.copysign(dd, aij)) / ajjd
c = 1. / math.sqrt(1. + t*t)
if c == 0:
# Underflow
s = 1.0
else:
s = c*t
return c, s
def _to_corr(self, m):
"""
Given a psd matrix m, rotate to put one's on the diagonal, turning it
into a correlation matrix. This also requires the trace equal the
dimensionality. Note: modifies input matrix
"""
# Check requirements for in-place Givens
if not (m.flags.c_contiguous and m.dtype == np.float64 and
m.shape[0] == m.shape[1]):
raise ValueError()
d = m.shape[0]
for i in range(d-1):
if m[i, i] == 1:
continue
elif m[i, i] > 1:
for j in range(i+1, d):
if m[j, j] < 1:
break
else:
for j in range(i+1, d):
if m[j, j] > 1:
break
c, s = self._givens_to_1(m[i, i], m[j, j], m[i, j])
# Use BLAS to apply Givens rotations in-place. Equivalent to:
# g = np.eye(d)
# g[i, i] = g[j,j] = c
# g[j, i] = -s; g[i, j] = s
# m = np.dot(g.T, np.dot(m, g))
mv = m.ravel()
drot(mv, mv, c, -s, n=d,
offx=i*d, incx=1, offy=j*d, incy=1,
overwrite_x=True, overwrite_y=True)
drot(mv, mv, c, -s, n=d,
offx=i, incx=d, offy=j, incy=d,
overwrite_x=True, overwrite_y=True)
return m
def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7):
"""Draw random correlation matrices.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix
tol : float, optional
Tolerance for input parameter checks
diag_tol : float, optional
Tolerance for deviation of the diagonal of the resulting
matrix. Default: 1e-7
Raises
------
RuntimeError
Floating point error prevented generating a valid correlation
matrix.
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim),
each having eigenvalues eigs.
"""
dim, eigs = self._process_parameters(eigs, tol=tol)
random_state = self._get_random_state(random_state)
m = ortho_group.rvs(dim, random_state=random_state)
m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m
m = self._to_corr(m) # Carefully rotate to unit diagonal
# Check diagonal
if abs(m.diagonal() - 1).max() > diag_tol:
raise RuntimeError("Failed to generate a valid correlation matrix")
return m
random_correlation = random_correlation_gen()
class unitary_group_gen(multi_rv_generic):
r"""A matrix-valued U(N) random variable.
Return a random unitary matrix.
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from U(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
-----
This class is similar to `ortho_group`.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> from scipy.stats import unitary_group
>>> x = unitary_group.rvs(3)
>>> np.dot(x, x.conj().T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
This generates one random matrix from U(3). The dot product confirms that
it is unitary up to machine precision.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""Dimension N must be specified; it cannot be inferred."""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""Draw random samples from U(N).
Parameters
----------
dim : integer
Dimension of space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
random_state = self._get_random_state(random_state)
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
z = 1/math.sqrt(2)*(random_state.normal(size=(dim, dim)) +
1j*random_state.normal(size=(dim, dim)))
q, r = scipy.linalg.qr(z)
d = r.diagonal()
q *= d/abs(d)
return q
unitary_group = unitary_group_gen()
_mvt_doc_default_callparams = \
"""
loc : array_like, optional
Location of the distribution. (default ``0``)
shape : array_like, optional
Positive semidefinite matrix of the distribution. (default ``1``)
df : float, optional
Degrees of freedom of the distribution; must be greater than zero.
If ``np.inf`` then results are multivariate normal. The default is ``1``.
allow_singular : bool, optional
Whether to allow a singular matrix. (default ``False``)
"""
_mvt_doc_callparams_note = \
"""Setting the parameter `loc` to ``None`` is equivalent to having `loc`
be the zero-vector. The parameter `shape` can be a scalar, in which case
the shape matrix is the identity times that value, a vector of
diagonal entries for the shape matrix, or a two-dimensional array_like.
"""
_mvt_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvt_docdict_params = {
'_mvt_doc_default_callparams': _mvt_doc_default_callparams,
'_mvt_doc_callparams_note': _mvt_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvt_docdict_noparams = {
'_mvt_doc_default_callparams': "",
'_mvt_doc_callparams_note': _mvt_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_t_gen(multi_rv_generic):
r"""A multivariate t-distributed random variable.
The `loc` parameter specifies the location. The `shape` parameter specifies
the positive semidefinite shape matrix. The `df` parameter specifies the
degrees of freedom.
In addition to calling the methods below, the object itself may be called
as a function to fix the location, shape matrix, and degrees of freedom
parameters, returning a "frozen" multivariate t-distribution random.
Methods
-------
``pdf(x, loc=None, shape=1, df=1, allow_singular=False)``
Probability density function.
``logpdf(x, loc=None, shape=1, df=1, allow_singular=False)``
Log of the probability density function.
``rvs(loc=None, shape=1, df=1, size=1, random_state=None)``
Draw random samples from a multivariate t-distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvt_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_mvt_doc_callparams_note)s
The matrix `shape` must be a (symmetric) positive semidefinite matrix. The
determinant and inverse of `shape` are computed as the pseudo-determinant
and pseudo-inverse, respectively, so that `shape` does not need to have
full rank.
The probability density function for `multivariate_t` is
.. math::
f(x) = \frac{\Gamma(\nu + p)/2}{\Gamma(\nu/2)\nu^{p/2}\pi^{p/2}|\Sigma|^{1/2}}
\exp\left[1 + \frac{1}{\nu} (\mathbf{x} - \boldsymbol{\mu})^{\top}
\boldsymbol{\Sigma}^{-1}
(\mathbf{x} - \boldsymbol{\mu}) \right]^{-(\nu + p)/2},
where :math:`p` is the dimension of :math:`\mathbf{x}`,
:math:`\boldsymbol{\mu}` is the :math:`p`-dimensional location,
:math:`\boldsymbol{\Sigma}` the :math:`p \times p`-dimensional shape
matrix, and :math:`\nu` is the degrees of freedom.
.. versionadded:: 1.6.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_t
>>> x, y = np.mgrid[-1:3:.01, -2:1.5:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_t([1.0, -0.5], [[2.1, 0.3], [0.3, 1.5]], df=2)
>>> fig, ax = plt.subplots(1, 1)
>>> ax.set_aspect('equal')
>>> plt.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
"""Initialize a multivariate t-distributed random variable.
Parameters
----------
seed : Random state.
"""
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvt_docdict_params)
self._random_state = check_random_state(seed)
def __call__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""Create a frozen multivariate t-distribution.
See `multivariate_t_frozen` for parameters.
"""
if df == np.inf:
return multivariate_normal_frozen(mean=loc, cov=shape,
allow_singular=allow_singular,
seed=seed)
return multivariate_t_frozen(loc=loc, shape=shape, df=df,
allow_singular=allow_singular, seed=seed)
def pdf(self, x, loc=None, shape=1, df=1, allow_singular=False):
"""Multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the probability density function.
%(_mvt_doc_default_callparams)s
Returns
-------
pdf : Probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.pdf(x, loc, shape, df)
array([0.00075713])
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
x = self._process_quantiles(x, dim)
shape_info = _PSD(shape, allow_singular=allow_singular)
logpdf = self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df,
dim, shape_info.rank)
return np.exp(logpdf)
def logpdf(self, x, loc=None, shape=1, df=1):
"""Log of the multivariate t-distribution probability density function.
Parameters
----------
x : array_like
Points at which to evaluate the log of the probability density
function.
%(_mvt_doc_default_callparams)s
Returns
-------
logpdf : Log of the probability density function evaluated at `x`.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.logpdf(x, loc, shape, df)
array([-7.1859802])
See Also
--------
pdf : Probability density function.
"""
dim, loc, shape, df = self._process_parameters(loc, shape, df)
x = self._process_quantiles(x, dim)
shape_info = _PSD(shape)
return self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim,
shape_info.rank)
def _logpdf(self, x, loc, prec_U, log_pdet, df, dim, rank):
"""Utility method `pdf`, `logpdf` for parameters.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability density
function.
loc : ndarray
Location of the distribution.
prec_U : ndarray
A decomposition such that `np.dot(prec_U, prec_U.T)` is the inverse
of the shape matrix.
log_pdet : float
Logarithm of the determinant of the shape matrix.
df : float
Degrees of freedom of the distribution.
dim : int
Dimension of the quantiles x.
rank : int
Rank of the shape matrix.
Notes
-----
As this function does no argument checking, it should not be called
directly; use 'logpdf' instead.
"""
if df == np.inf:
return multivariate_normal._logpdf(x, loc, prec_U, log_pdet, rank)
dev = x - loc
maha = np.square(np.dot(dev, prec_U)).sum(axis=-1)
t = 0.5 * (df + dim)
A = gammaln(t)
B = gammaln(0.5 * df)
C = dim/2. * np.log(df * np.pi)
D = 0.5 * log_pdet
E = -t * np.log(1 + (1./df) * maha)
return _squeeze_output(A - B - C - D + E)
def rvs(self, loc=None, shape=1, df=1, size=1, random_state=None):
"""Draw random samples from a multivariate t-distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `P`), where `P` is the
dimension of the random variable.
Examples
--------
>>> from scipy.stats import multivariate_t
>>> x = [0.4, 5]
>>> loc = [0, 1]
>>> shape = [[1, 0.1], [0.1, 1]]
>>> df = 7
>>> multivariate_t.rvs(loc, shape, df)
array([[0.93477495, 3.00408716]])
"""
# For implementation details, see equation (3):
#
# Hofert, "On Sampling from the Multivariatet Distribution", 2013
# http://rjournal.github.io/archive/2013-2/hofert.pdf
#
dim, loc, shape, df = self._process_parameters(loc, shape, df)
if random_state is not None:
rng = check_random_state(random_state)
else:
rng = self._random_state
if np.isinf(df):
x = np.ones(size)
else:
x = rng.chisquare(df, size=size) / df
z = rng.multivariate_normal(np.zeros(dim), shape, size=size)
samples = loc + z / np.sqrt(x)[..., None]
return _squeeze_output(samples)
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _process_parameters(self, loc, shape, df):
"""
Infer dimensionality from location array and shape matrix, handle
defaults, and ensure compatible dimensions.
"""
if loc is None and shape is None:
loc = np.asarray(0, dtype=float)
shape = np.asarray(1, dtype=float)
dim = 1
elif loc is None:
shape = np.asarray(shape, dtype=float)
if shape.ndim < 2:
dim = 1
else:
dim = shape.shape[0]
loc = np.zeros(dim)
elif shape is None:
loc = np.asarray(loc, dtype=float)
dim = loc.size
shape = np.eye(dim)
else:
shape = np.asarray(shape, dtype=float)
loc = np.asarray(loc, dtype=float)
dim = loc.size
if dim == 1:
loc.shape = (1,)
shape.shape = (1, 1)
if loc.ndim != 1 or loc.shape[0] != dim:
raise ValueError("Array 'loc' must be a vector of length %d." %
dim)
if shape.ndim == 0:
shape = shape * np.eye(dim)
elif shape.ndim == 1:
shape = np.diag(shape)
elif shape.ndim == 2 and shape.shape != (dim, dim):
rows, cols = shape.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(shape.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'loc' is a vector of length %d.")
msg = msg % (str(shape.shape), len(loc))
raise ValueError(msg)
elif shape.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % shape.ndim)
# Process degrees of freedom.
if df is None:
df = 1
elif df <= 0:
raise ValueError("'df' must be greater than zero.")
elif np.isnan(df):
raise ValueError("'df' is 'nan' but must be greater than zero or 'np.inf'.")
return dim, loc, shape, df
class multivariate_t_frozen(multi_rv_frozen):
def __init__(self, loc=None, shape=1, df=1, allow_singular=False,
seed=None):
"""Create a frozen multivariate t distribution.
Parameters
----------
%(_mvt_doc_default_callparams)s
Examples
--------
>>> loc = np.zeros(3)
>>> shape = np.eye(3)
>>> df = 10
>>> dist = multivariate_t(loc, shape, df)
>>> dist.rvs()
array([[ 0.81412036, -1.53612361, 0.42199647]])
>>> dist.pdf([1, 1, 1])
array([0.01237803])
"""
self._dist = multivariate_t_gen(seed)
dim, loc, shape, df = self._dist._process_parameters(loc, shape, df)
self.dim, self.loc, self.shape, self.df = dim, loc, shape, df
self.shape_info = _PSD(shape, allow_singular=allow_singular)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
U = self.shape_info.U
log_pdet = self.shape_info.log_pdet
return self._dist._logpdf(x, self.loc, U, log_pdet, self.df, self.dim,
self.shape_info.rank)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(loc=self.loc,
shape=self.shape,
df=self.df,
size=size,
random_state=random_state)
multivariate_t = multivariate_t_gen()
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_t_gen.__dict__[name]
method_frozen = multivariate_t_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__,
mvt_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvt_docdict_params)
_mhg_doc_default_callparams = """\
m : array_like
The number of each type of object in the population.
That is, :math:`m[i]` is the number of objects of
type :math:`i`.
n : array_like
The number of samples taken from the population.
"""
_mhg_doc_callparams_note = """\
`m` must be an array of positive integers. If the quantile
:math:`i` contains values out of the range :math:`[0, m_i]`
where :math:`m_i` is the number of objects of type :math:`i`
in the population or if the parameters are inconsistent with one
another (e.g. ``x.sum() != n``), methods return the appropriate
value (e.g. ``0`` for ``pmf``). If `m` or `n` contain negative
values, the result will contain ``nan`` there.
"""
_mhg_doc_frozen_callparams = ""
_mhg_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mhg_docdict_params = {
'_doc_default_callparams': _mhg_doc_default_callparams,
'_doc_callparams_note': _mhg_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mhg_docdict_noparams = {
'_doc_default_callparams': _mhg_doc_frozen_callparams,
'_doc_callparams_note': _mhg_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_hypergeom_gen(multi_rv_generic):
r"""A multivariate hypergeometric random variable.
Methods
-------
``pmf(x, m, n)``
Probability mass function.
``logpmf(x, m, n)``
Log of the probability mass function.
``rvs(m, n, size=1, random_state=None)``
Draw random samples from a multivariate hypergeometric
distribution.
``mean(m, n)``
Mean of the multivariate hypergeometric distribution.
``var(m, n)``
Variance of the multivariate hypergeometric distribution.
``cov(m, n)``
Compute the covariance matrix of the multivariate
hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
The probability mass function for `multivariate_hypergeom` is
.. math::
P(X_1 = x_1, X_2 = x_2, \ldots, X_k = x_k) = \frac{\binom{m_1}{x_1}
\binom{m_2}{x_2} \cdots \binom{m_k}{x_k}}{\binom{M}{n}}, \\ \quad
(x_1, x_2, \ldots, x_k) \in \mathbb{N}^k \text{ with }
\sum_{i=1}^k x_i = n
where :math:`m_i` are the number of objects of type :math:`i`, :math:`M`
is the total number of objects in the population (sum of all the
:math:`m_i`), and :math:`n` is the size of the sample to be taken
from the population.
.. versionadded:: 1.6.0
Examples
--------
To evaluate the probability mass function of the multivariate
hypergeometric distribution, with a dichotomous population of size
:math:`10` and :math:`20`, at a sample of size :math:`12` with
:math:`8` objects of the first type and :math:`4` objects of the
second type, use:
>>> from scipy.stats import multivariate_hypergeom
>>> multivariate_hypergeom.pmf(x=[8, 4], m=[10, 20], n=12)
0.0025207176631464523
The `multivariate_hypergeom` distribution is identical to the
corresponding `hypergeom` distribution (tiny numerical differences
notwithstanding) when only two types (good and bad) of objects
are present in the population as in the example above. Consider
another example for a comparison with the hypergeometric distribution:
>>> from scipy.stats import hypergeom
>>> multivariate_hypergeom.pmf(x=[3, 1], m=[10, 5], n=4)
0.4395604395604395
>>> hypergeom.pmf(k=3, M=15, n=4, N=10)
0.43956043956044005
The functions ``pmf``, ``logpmf``, ``mean``, ``var``, ``cov``, and ``rvs``
support broadcasting, under the convention that the vector parameters
(``x``, ``m``, and ``n``) are interpreted as if each row along the last
axis is a single object. For instance, we can combine the previous two
calls to `multivariate_hypergeom` as
>>> multivariate_hypergeom.pmf(x=[[8, 4], [3, 1]], m=[[10, 20], [10, 5]],
... n=[12, 4])
array([0.00252072, 0.43956044])
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``m.shape[-1]``. For example:
>>> multivariate_hypergeom.cov(m=[[7, 9], [10, 15]], n=[8, 12])
array([[[ 1.05, -1.05],
[-1.05, 1.05]],
[[ 1.56, -1.56],
[-1.56, 1.56]]])
That is, ``result[0]`` is equal to
``multivariate_hypergeom.cov(m=[7, 9], n=8)`` and ``result[1]`` is equal
to ``multivariate_hypergeom.cov(m=[10, 15], n=12)``.
Alternatively, the object may be called (as a function) to fix the `m`
and `n` parameters, returning a "frozen" multivariate hypergeometric
random variable.
>>> rv = multivariate_hypergeom(m=[10, 20], n=12)
>>> rv.pmf(x=[8, 4])
0.0025207176631464523
See Also
--------
scipy.stats.hypergeom : The hypergeometric distribution.
scipy.stats.multinomial : The multinomial distribution.
References
----------
.. [1] The Multivariate Hypergeometric Distribution,
http://www.randomservices.org/random/urn/MultiHypergeometric.html
.. [2] Thomas J. Sargent and John Stachurski, 2020,
Multivariate Hypergeometric Distribution
https://python.quantecon.org/_downloads/pdf/multi_hyper.pdf
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mhg_docdict_params)
def __call__(self, m, n, seed=None):
"""Create a frozen multivariate_hypergeom distribution.
See `multivariate_hypergeom_frozen` for more information.
"""
return multivariate_hypergeom_frozen(m, n, seed=seed)
def _process_parameters(self, m, n):
m = np.asarray(m)
n = np.asarray(n)
if m.size == 0:
m = m.astype(int)
if n.size == 0:
n = n.astype(int)
if not np.issubdtype(m.dtype, np.integer):
raise TypeError("'m' must an array of integers.")
if not np.issubdtype(n.dtype, np.integer):
raise TypeError("'n' must an array of integers.")
if m.ndim == 0:
raise ValueError("'m' must be an array with"
" at least one dimension.")
# check for empty arrays
if m.size != 0:
n = n[..., np.newaxis]
m, n = np.broadcast_arrays(m, n)
# check for empty arrays
if m.size != 0:
n = n[..., 0]
mcond = m < 0
M = m.sum(axis=-1)
ncond = (n < 0) | (n > M)
return M, m, n, mcond, ncond, np.any(mcond, axis=-1) | ncond
def _process_quantiles(self, x, M, m, n):
x = np.asarray(x)
if not np.issubdtype(x.dtype, np.integer):
raise TypeError("'x' must an array of integers.")
if x.ndim == 0:
raise ValueError("'x' must be an array with"
" at least one dimension.")
if not x.shape[-1] == m.shape[-1]:
raise ValueError(f"Size of each quantile must be size of 'm': "
f"received {x.shape[-1]}, "
f"but expected {m.shape[-1]}.")
# check for empty arrays
if m.size != 0:
n = n[..., np.newaxis]
M = M[..., np.newaxis]
x, m, n, M = np.broadcast_arrays(x, m, n, M)
# check for empty arrays
if m.size != 0:
n, M = n[..., 0], M[..., 0]
xcond = (x < 0) | (x > m)
return (x, M, m, n, xcond,
np.any(xcond, axis=-1) | (x.sum(axis=-1) != n))
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
return bad_value
if result.ndim == 0:
return result[()]
return result
def _logpmf(self, x, M, m, n, mxcond, ncond):
# This equation of the pmf comes from the relation,
# n combine r = beta(n+1, 1) / beta(r+1, n-r+1)
num = np.zeros_like(m, dtype=np.float_)
den = np.zeros_like(n, dtype=np.float_)
m, x = m[~mxcond], x[~mxcond]
M, n = M[~ncond], n[~ncond]
num[~mxcond] = (betaln(m+1, 1) - betaln(x+1, m-x+1))
den[~ncond] = (betaln(M+1, 1) - betaln(n+1, M-n+1))
num[mxcond] = np.nan
den[ncond] = np.nan
num = num.sum(axis=-1)
return num - den
def logpmf(self, x, m, n):
"""Log of the multivariate hypergeometric probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
M, m, n, mcond, ncond, mncond = self._process_parameters(m, n)
(x, M, m, n, xcond,
xcond_reduced) = self._process_quantiles(x, M, m, n)
mxcond = mcond | xcond
ncond = ncond | np.zeros(n.shape, dtype=np.bool_)
result = self._logpmf(x, M, m, n, mxcond, ncond)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond_reduced | np.zeros(mncond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, np.NINF)
# replace values bad for n or m; broadcast
# mncond to the right shape
mncond_ = mncond | np.zeros(xcond_reduced.shape, dtype=np.bool_)
return self._checkresult(result, mncond_, np.nan)
def pmf(self, x, m, n):
"""Multivariate hypergeometric probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
out = np.exp(self.logpmf(x, m, n))
return out
def mean(self, m, n):
"""Mean of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : array_like or scalar
The mean of the distribution
"""
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M, n = M[..., np.newaxis], n[..., np.newaxis]
cond = (M == 0)
M = np.ma.masked_array(M, mask=cond)
mu = n*(m/M)
if m.size != 0:
mncond = (mncond[..., np.newaxis] |
np.zeros(mu.shape, dtype=np.bool_))
return self._checkresult(mu, mncond, np.nan)
def var(self, m, n):
"""Variance of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
array_like
The variances of the components of the distribution. This is
the diagonal of the covariance matrix of the distribution
"""
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M, n = M[..., np.newaxis], n[..., np.newaxis]
cond = (M == 0) & (M-1 == 0)
M = np.ma.masked_array(M, mask=cond)
output = n * m/M * (M-m)/M * (M-n)/(M-1)
if m.size != 0:
mncond = (mncond[..., np.newaxis] |
np.zeros(output.shape, dtype=np.bool_))
return self._checkresult(output, mncond, np.nan)
def cov(self, m, n):
"""Covariance matrix of the multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : array_like
The covariance matrix of the distribution
"""
# see [1]_ for the formula and [2]_ for implementation
# cov( x_i,x_j ) = -n * (M-n)/(M-1) * (K_i*K_j) / (M**2)
M, m, n, _, _, mncond = self._process_parameters(m, n)
# check for empty arrays
if m.size != 0:
M = M[..., np.newaxis, np.newaxis]
n = n[..., np.newaxis, np.newaxis]
cond = (M == 0) & (M-1 == 0)
M = np.ma.masked_array(M, mask=cond)
output = (-n * (M-n)/(M-1) *
np.einsum("...i,...j->...ij", m, m) / (M**2))
# check for empty arrays
if m.size != 0:
M, n = M[..., 0, 0], n[..., 0, 0]
cond = cond[..., 0, 0]
dim = m.shape[-1]
# diagonal entries need to be computed differently
for i in range(dim):
output[..., i, i] = (n * (M-n) * m[..., i]*(M-m[..., i]))
output[..., i, i] = output[..., i, i] / (M-1)
output[..., i, i] = output[..., i, i] / (M**2)
if m.size != 0:
mncond = (mncond[..., np.newaxis, np.newaxis] |
np.zeros(output.shape, dtype=np.bool_))
return self._checkresult(output, mncond, np.nan)
def rvs(self, m, n, size=None, random_state=None):
"""Draw random samples from a multivariate hypergeometric distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw. Default is ``None``, in which case a
single variate is returned as an array with shape ``m.shape``.
%(_doc_random_state)s
Returns
-------
rvs : array_like
Random variates of shape ``size`` or ``m.shape``
(if ``size=None``).
Notes
-----
%(_doc_callparams_note)s
Also note that NumPy's `multivariate_hypergeometric` sampler is not
used as it doesn't support broadcasting.
"""
M, m, n, _, _, _ = self._process_parameters(m, n)
random_state = self._get_random_state(random_state)
if size is not None and isinstance(size, int):
size = (size, )
if size is None:
rvs = np.empty(m.shape, dtype=m.dtype)
else:
rvs = np.empty(size + (m.shape[-1], ), dtype=m.dtype)
rem = M
# This sampler has been taken from numpy gh-13794
# https://github.com/numpy/numpy/pull/13794
for c in range(m.shape[-1] - 1):
rem = rem - m[..., c]
rvs[..., c] = ((n != 0) *
random_state.hypergeometric(m[..., c], rem,
n + (n == 0),
size=size))
n = n - rvs[..., c]
rvs[..., m.shape[-1] - 1] = n
return rvs
multivariate_hypergeom = multivariate_hypergeom_gen()
class multivariate_hypergeom_frozen(multi_rv_frozen):
def __init__(self, m, n, seed=None):
self._dist = multivariate_hypergeom_gen(seed)
(self.M, self.m, self.n,
self.mcond, self.ncond,
self.mncond) = self._dist._process_parameters(m, n)
# monkey patch self._dist
def _process_parameters(m, n):
return (self.M, self.m, self.n,
self.mcond, self.ncond,
self.mncond)
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.m, self.n)
def pmf(self, x):
return self._dist.pmf(x, self.m, self.n)
def mean(self):
return self._dist.mean(self.m, self.n)
def var(self):
return self._dist.var(self.m, self.n)
def cov(self):
return self._dist.cov(self.m, self.n)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.m, self.n,
size=size,
random_state=random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_hypergeom and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'var', 'cov', 'rvs']:
method = multivariate_hypergeom_gen.__dict__[name]
method_frozen = multivariate_hypergeom_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, mhg_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
mhg_docdict_params)
| bsd-3-clause |
TheChymera/pyASF | ASFpca.py | 1 | 12891 | #!/usr/bin/env python
from __future__ import division
__author__ = 'Horea Christian'
from matplotlib.ticker import MultipleLocator, FuncFormatter, FormatStrFormatter
import mdp
import os
import matplotlib.pyplot as plt
import numpy as np
from loadsums import ca_loadsum
from pylab import close, xlabel, ylabel, show, legend, savefig
from mpl_toolkits.axes_grid1 import ImageGrid
from load_vdaq_conds import vdaq_conds
from get_data import unpack_block_file
stim_start = 15
stim_length = 5
en_masse = True # whether or not to show the result for a single block file or export the results of a list of block files
if en_masse == False:
from get_data import data_name_get
data_name = data_name_get()
lenheader, nframesperstim, framewidth, frameheight, _, _, _, _ = unpack_block_file(data_name)
nframesperstim = np.array(nframesperstim)
timepts = np.array(nframesperstim)
img_range_zero = np.arange(0, nframesperstim) # put reading head at the beginning of the baseline condition images (and set limit)
img_range_one = np.arange(nframesperstim, nframesperstim * 2) # move reading point to the trial condition images (and set limit)
data_type, bytes_per_pixel = vdaq_conds(data_name)
fimg_ca_one = ca_loadsum(data_name, img_range_one, True, data_type, framewidth, frameheight, lenheader, bytes_per_pixel)
fimg_ca_zero = ca_loadsum(data_name, img_range_zero, True, data_type, framewidth, frameheight, lenheader, bytes_per_pixel)
fimg_ca = fimg_ca_one / fimg_ca_zero # normalize trial condition to baseline condition
pca = mdp.nodes.PCANode()
pca_result = pca(fimg_ca)
pca_time = pca.get_projmatrix()
ica = mdp.nodes.CuBICANode(white_comp=10)
ica_result = ica(fimg_ca)
ica_time = ica.get_projmatrix()
fig1 = plt.figure(1,(22,10),facecolor='#eeeeee')
fig1.suptitle('Components according to: PCA (first panel) and ICA (second panel)')
x0=fig1.add_subplot(3,2,1)
x0.plot(pca_time[:,0]-np.mean(pca_time[:stim_start,0]), 'c', alpha = 1, label="Component 1")
x0.plot(pca_time[:,1]-np.mean(pca_time[:stim_start,1]), 'm', alpha = 0.9, label="Component 2")
x0.plot(pca_time[:,2]-np.mean(pca_time[:stim_start,2]), 'y', alpha = 0.8, label="Component 3")
x0.plot(pca_time[:,3]-np.mean(pca_time[:stim_start,3]), 'k', alpha = 0.7, label="Component 4")
ylabel('Component Activity', fontsize='12')
xlabel('Time [s]', fontsize='12')
x0.xaxis.set_major_formatter(FuncFormatter(lambda x,i: "%.0f" % (x/10)))
x0.xaxis.set_minor_locator(MultipleLocator(5))
x0.yaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
legend(bbox_to_anchor=(1.04, 1.25), ncol=4)
x0.set_xlim(0, timepts-1)
C = pca_result[:,0]
height = np.sqrt(len(C))
a0 = np.reshape(C,(height,height))
C = pca_result[:,1]
a1 = np.reshape(C,(height,height))
C = pca_result[:,2]
a2 = np.reshape(C,(height,height))
C = pca_result[:,3]
a3 = np.reshape(C,(height,height))
C = pca_result[:,4]
a4 = np.reshape(C,(height,height))
C = pca_result[:,5]
a5 = np.reshape(C,(height,height))
ZS = [a0, a1, a2]
grid = ImageGrid(fig1, 323,
nrows_ncols = (1, 3),
direction="row",
axes_pad = 0.4,
cbar_location="top",
cbar_mode="each",
cbar_size="10%",
cbar_pad=0.05
)
for ax, z in zip(grid, ZS):
im = ax.imshow(z,origin="upper",interpolation="none")
ax.cax.colorbar(im)
ZS = [a3, a4, a5]
grid = ImageGrid(fig1, 325,
nrows_ncols = (1, 3),
direction="row",
axes_pad = 0.4,
cbar_location="top",
cbar_mode="each",
cbar_size="10%",
cbar_pad=0.05
)
for ax, z in zip(grid, ZS):
im = ax.imshow(z,origin="upper",interpolation="none")
ax.cax.colorbar(im)
x0=fig1.add_subplot(3,2,2)
x0.plot(ica_time[:,0]-np.mean(ica_time[:stim_start,0]), 'c', alpha = 1, label="Component 1")
x0.plot(ica_time[:,1]-np.mean(ica_time[:stim_start,1]), 'm', alpha = 0.9, label="Component 2")
x0.plot(ica_time[:,2]-np.mean(ica_time[:stim_start,2]), 'y', alpha = 0.8, label="Component 3")
x0.plot(ica_time[:,3]-np.mean(ica_time[:stim_start,3]), 'k', alpha = 0.7, label="Component 4")
ylabel('Component Activity', fontsize='12')
xlabel('Time [s]', fontsize='12')
x0.xaxis.set_major_formatter(FuncFormatter(lambda x,i: "%.0f" % (x/10)))
x0.xaxis.set_minor_locator(MultipleLocator(5))
x0.yaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
legend(bbox_to_anchor=(1.04, 1.25), ncol=4)
x0.set_xlim(0, timepts-1)
C = ica_result[:,0]
height = np.sqrt(len(C))
a0 = np.reshape(C,(height,height))
C = ica_result[:,1]
a1 = np.reshape(C,(height,height))
C = ica_result[:,2]
a2 = np.reshape(C,(height,height))
C = ica_result[:,3]
a3 = np.reshape(C,(height,height))
C = ica_result[:,4]
a4 = np.reshape(C,(height,height))
C = ica_result[:,5]
a5 = np.reshape(C,(height,height))
ZS = [a0, a1, a2]
grid = ImageGrid(fig1, 324,
nrows_ncols = (1, 3),
direction="row",
axes_pad = 0.4,
cbar_location="top",
cbar_mode="each",
cbar_size="10%",
cbar_pad=0.05
)
for ax, z in zip(grid, ZS):
im = ax.imshow(z,origin="upper",interpolation="none")
ax.cax.colorbar(im)
ZS = [a3, a4, a5]
grid = ImageGrid(fig1, 326,
nrows_ncols = (1, 3),
direction="row",
axes_pad = 0.4,
cbar_location="top",
cbar_mode="each",
cbar_size="10%",
cbar_pad=0.05
)
for ax, z in zip(grid, ZS):
im = ax.imshow(z,origin="upper",interpolation="none")
ax.cax.colorbar(im)
show()
else:
from get_data import data_names_get
data_names = data_names_get()
for i in data_names:
lenheader, nframesperstim, framewidth, frameheight, _, _, _, _ = unpack_block_file(i)
nframesperstim = np.array(nframesperstim)
timepts = np.array(nframesperstim)
img_range_zero = np.arange(0, nframesperstim) # put reading head at the beginning of the baseline condition images (and set limit)
img_range_one = np.arange(nframesperstim, nframesperstim * 2) # move reading point to the trial condition images (and set limit)
data_type, bytes_per_pixel = vdaq_conds(i)
fimg_ca_one = ca_loadsum(i, img_range_one, True, data_type, framewidth, frameheight, lenheader, bytes_per_pixel)
fimg_ca_zero = ca_loadsum(i, img_range_zero, True, data_type, framewidth, frameheight, lenheader, bytes_per_pixel)
fimg_ca = fimg_ca_one / fimg_ca_zero # normalize trial condition to baseline condition
pca = mdp.nodes.PCANode()
pca_result = pca(fimg_ca)
pca_time = pca.get_projmatrix()
ica = mdp.nodes.CuBICANode(white_comp=10)
ica_result = ica(fimg_ca)
ica_time = ica.get_projmatrix()
fig1 = plt.figure(1,(22,10),facecolor='#eeeeee')
fig1.suptitle('Components according to: PCA (first panel) and ICA (second panel)')
x0=fig1.add_subplot(3,2,1)
x0.plot(pca_time[:,0]-np.mean(pca_time[:stim_start,0]), 'c', alpha = 1, label="Component 1")
x0.plot(pca_time[:,1]-np.mean(pca_time[:stim_start,1]), 'm', alpha = 0.9, label="Component 2")
x0.plot(pca_time[:,2]-np.mean(pca_time[:stim_start,2]), 'y', alpha = 0.8, label="Component 3")
x0.plot(pca_time[:,3]-np.mean(pca_time[:stim_start,3]), 'k', alpha = 0.7, label="Component 4")
ylabel('Component Activity', fontsize='12')
xlabel('Time [s]', fontsize='12')
x0.xaxis.set_major_formatter(FuncFormatter(lambda x,i: "%.0f" % (x/10)))
x0.xaxis.set_minor_locator(MultipleLocator(5))
x0.yaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
legend(bbox_to_anchor=(1.04, 1.25), ncol=4)
x0.set_xlim(0, timepts-1)
C = pca_result[:,0]
height = np.sqrt(len(C))
a0 = np.reshape(C,(height,height))
C = pca_result[:,1]
a1 = np.reshape(C,(height,height))
C = pca_result[:,2]
a2 = np.reshape(C,(height,height))
C = pca_result[:,3]
a3 = np.reshape(C,(height,height))
C = pca_result[:,4]
a4 = np.reshape(C,(height,height))
C = pca_result[:,5]
a5 = np.reshape(C,(height,height))
ZS = [a0, a1, a2]
grid = ImageGrid(fig1, 323,
nrows_ncols = (1, 3),
direction="row",
axes_pad = 0.4,
cbar_location="top",
cbar_mode="each",
cbar_size="10%",
cbar_pad=0.05
)
for ax, z in zip(grid, ZS):
im = ax.imshow(z,origin="upper",interpolation="none")
ax.cax.colorbar(im)
ZS = [a3, a4, a5]
grid = ImageGrid(fig1, 325,
nrows_ncols = (1, 3),
direction="row",
axes_pad = 0.4,
cbar_location="top",
cbar_mode="each",
cbar_size="10%",
cbar_pad=0.05
)
for ax, z in zip(grid, ZS):
im = ax.imshow(z,origin="upper",interpolation="none")
ax.cax.colorbar(im)
x0=fig1.add_subplot(3,2,2)
x0.plot(ica_time[:,0]-np.mean(ica_time[:stim_start,0]), 'c', alpha = 1, label="Component 1")
x0.plot(ica_time[:,1]-np.mean(ica_time[:stim_start,1]), 'm', alpha = 0.9, label="Component 2")
x0.plot(ica_time[:,2]-np.mean(ica_time[:stim_start,2]), 'y', alpha = 0.8, label="Component 3")
x0.plot(ica_time[:,3]-np.mean(ica_time[:stim_start,3]), 'k', alpha = 0.7, label="Component 4")
ylabel('Component Activity', fontsize='12')
xlabel('Time [s]', fontsize='12')
x0.xaxis.set_major_formatter(FuncFormatter(lambda x,i: "%.0f" % (x/10)))
x0.xaxis.set_minor_locator(MultipleLocator(5))
x0.yaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
legend(bbox_to_anchor=(1.04, 1.25), ncol=4)
x0.set_xlim(0, timepts-1)
C = ica_result[:,0]
height = np.sqrt(len(C))
a0 = np.reshape(C,(height,height))
C = ica_result[:,1]
a1 = np.reshape(C,(height,height))
C = ica_result[:,2]
a2 = np.reshape(C,(height,height))
C = ica_result[:,3]
a3 = np.reshape(C,(height,height))
C = ica_result[:,4]
a4 = np.reshape(C,(height,height))
C = ica_result[:,5]
a5 = np.reshape(C,(height,height))
ZS = [a0, a1, a2]
grid = ImageGrid(fig1, 324,
nrows_ncols = (1, 3),
direction="row",
axes_pad = 0.4,
cbar_location="top",
cbar_mode="each",
cbar_size="10%",
cbar_pad=0.05
)
for ax, z in zip(grid, ZS):
im = ax.imshow(z,origin="upper",interpolation="none")
ax.cax.colorbar(im)
ZS = [a3, a4, a5]
grid = ImageGrid(fig1, 326,
nrows_ncols = (1, 3),
direction="row",
axes_pad = 0.4,
cbar_location="top",
cbar_mode="each",
cbar_size="10%",
cbar_pad=0.05
)
for ax, z in zip(grid, ZS):
im = ax.imshow(z,origin="upper",interpolation="none")
ax.cax.colorbar(im)
le_path, le_file = os.path.split(i)
le_file, le_extension = os.path.splitext(le_file)
if os.path.isdir(le_path+'/ca-figs/'):
pass
else: os.mkdir(le_path+'/ca-figs/')
savefig(le_path+'/ca-figs/'+le_file+'-ca.png', bbox_inches=0)
close() | gpl-3.0 |
Barmaley-exe/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 5 | 2418 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
"""Test Chi2 feature extraction"""
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
"""Check that chi2 works with a COO matrix
(as returned by CountVectorizer, DictVectorizer)
"""
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
"""Check for proper error on negative numbers in the input X."""
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
"""Test replacement for scipy.stats.chisquare against the original."""
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
AIML/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 142 | 4467 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
cdsi/grima | src/python/grima/plot.py | 1 | 25680 | from __future__ import division
from __future__ import with_statement
# standard python libraries
try:
import json
except:
import simplejson as json
import re
import os
import time
# matplotlib.sf.net
import matplotlib
import numpy
# www.gtk.org
import gtk
# our own libraries
from elrond.static import *
from elrond.util import APINotImplemented, Object, Property
def parse(f):
x = []
y = []
fd = open(f, 'r')
lines = [l.strip() for l in fd.readlines()]
fd.close()
for i, line in enumerate(lines):
data = filter(lambda x: x != '', re.split('[, ]', line.strip()))
try:
y.append(float(data[1]))
x.append(float(data[0]))
except IndexError:
y.append(float(data[0]))
x.append(i)
return x, y
##
## Backends...
##
class IBackend(Object):
"""The IBackend class is the base implementation for any class that can produce plots.
e.g. ASCII art or fancy GUI backends like matplotlib.
"""
def stripchart(self, filename):
x_list, y_list = parse(filename)
self.clear()
self.prefs.ymin = 0
self.prefs.ymax = 100
step = 100
x_first = x_list[0: clamp(step, u=len(x_list))]
y_first = y_list[0: clamp(step, u=len(y_list))]
self.prefs.xmin = 0
self.prefs.xmax = len(x_first)
for i in range(0, len(x_first)):
self.plotl(x_first[0:i + 1], y_first[0:i + 1])
self.draw()
self.plotl(x_list, y_list)
for i in range(0, len(x_list)):
self.prefs.xmin = i + 1
self.prefs.xmax = i + 1 + step
self.draw()
def open(self, filename, i=None):
self.clear()
with open(filename, 'r') as f:
storage = json.load(f)
print 'File: %s' % (filename)
print 'Timestamp: %s' % (storage['timestamp'])
for data in storage['data']:
self.plotl(data['x'], data['y'], i=i, xlabel=data['xlabel'], ylabel=data['ylabel'],
style=data['style'], color=int(data['color'], 0))
self.draw()
if not self.prefs.overlay:
self.__storage['data'] = []
self.__storage['data'].extend(storage['data'])
def save(self, filename):
self.__storage['timestamp'] = time.ctime(time.time())
with open(filename, 'w') as f:
json.dump(self.__storage, f, indent=8)
# TODO:
def stats(self, x, y):
print ' len =', len(y)
print ' mean =', numpy.mean(y)
print ' sum =', sum(y)
print ' std =', numpy.std(y)
ymin = numpy.min(y)
print ' ymin =', ymin
print ' xmin =', x[y.index(ymin)]
ymax = numpy.max(y)
print ' ymax =', ymax
print ' xmax =', x[y.index(ymax)]
def __plot__(self, x, y, style=None, color=0xFF0000, xlabel=None, ylabel=None):
self.stats(x, y)
data = {
'xlabel': xlabel,
'x': x,
'ylabel': ylabel,
'y': y,
'style': style,
'color': '0x%06X' % (color)
}
if not self.prefs.overlay:
self.__storage['data'] = []
self.__storage['data'].append(data)
def plotr(self, *args, **kwargs):
self.__plot__(*args, **kwargs)
def plotl(self, *args, **kwargs):
self.__plot__(*args, **kwargs)
def plotlh(self, *args, **kwargs):
self.__plot__(*args, **kwargs)
def plotlv(self, *args, **kwargs):
self.__plot__(*args, **kwargs)
def plotrh(self, *args, **kwargs):
self.__plot__(*args, **kwargs)
def plotrv(self, *args, **kwargs):
self.__plot__(*args, **kwargs)
def draw(self, *args, **kwargs):
pass
def clear(self, *args, **kwargs):
pass
def show(self, *args, **kwargs):
pass
def hide(self, *args, **kwargs):
pass
def run(self, *args, **kwargs):
pass
def __init__(self):
Object.__init__(self)
self.__storage = {
'data': []
}
class ConsoleBackend(IBackend):
"""This is the simplest of backends. This simply prints to the console. This backend
must be used within a ConsoleContainer.
"""
def __plot__(self, x, y, style=None, color=0xFF0000, xlabel=None, ylabel=None):
IBackend.__plot__(self, x, y, style=style, color=color, xlabel=xlabel, ylabel=ylabel)
for i in range(0, len(x)):
print 'x,y[%d] = %.4f, %4f' % (i, x[i], y[i])
class IMatplotlibBackend(IBackend):
"""This backend uses matplotlib to prodce plots. An ImageContainer or WindowContainer in-turn
contains this backed to either render the plot to and image or to a GUI.
"""
def __plot__(self, x, y, i=None, axes=None, style='-', color=0xFF0000, xlabel=None, ylabel=None):
IBackend.__plot__(self, x, y, style=style, color=color, xlabel=xlabel, ylabel=ylabel)
if i is None or axes is None:
# TODO: raise an exception
return
if not xlabel is None:
# TODO: axes.set_xlabel(xlabel)
pass
if not ylabel is None:
# TODO: axes.set_ylabel(ylabel)
pass
if i > self.__nsubplots - 1:
self.subplot_new()
subplot = self.__subplots[i][axes]
subplot.plot(x, y, style, color='#%06X' % (color))
subplot.grid(True)
def plotl(self, *args, **kwargs):
if not 'i' in kwargs:
kwargs['i'] = self.__isubplot
kwargs['axes'] = 'axl'
self.__plot__(*args, **kwargs)
@APINotImplemented
def plotr(self, *args, **kwargs):
if not 'i' in kwargs:
kwargs['i'] = self.__isubplot
kwargs['axes'] = 'axr'
self.__plot__(*args, **kwargs)
def plotlh(self, y, i=None, style='--', color=0xFF0000):
# TODO: must call self.__plot__
if i is None:
i = self.__isubplot
self.__subplots[i]['axl'].axhline(y, ls=style, color='#%06X' % (color))
self.__subplots[i]['axl'].grid(True)
def plotlv(self, x, i=None, style='--', color=0xFF0000):
# TODO: must call self.__plot__
if i is None:
i = self.__isubplot
self.__subplots[i]['axl'].axvline(x, ls=style, color='#%06X' % (color))
self.__subplots[i]['axl'].grid(True)
@APINotImplemented
def plotrh(self, y, i=None, style='--', color=0xFF0000):
# TODO: must call self.__plot__
if i is None:
i = self.__isubplot
self.__subplots[i]['axr'].axhline(y, ls=style, color='#%06X' % (color))
self.__subplots[i]['axr'].grid(True)
@APINotImplemented
def plotrv(self, x, i=None, style='--', color=0xFF0000):
# TODO: must call self.__plot__
if i is None:
i = self.__isubplot
self.__subplots[i]['axr'].axvline(x, ls=style, color='#%06X' % (color))
self.__subplots[i]['axr'].grid(True)
def __draw(self, subplot, limits):
subplot.axis('auto')
if filter(lambda x: x != 0, limits):
subplot.axis(limits)
def draw(self):
limits = [self.prefs.xmin, self.prefs.xmax, self.prefs.yminl, self.prefs.ymaxl]
for subplot in self.__subplots:
self.__draw(subplot['axl'], limits)
# limits = [self.prefs.xmin, self.prefs.xmax, self.prefs.yminr, self.prefs.ymaxr]
# for subplot in self.__subplots:
# self.__draw(subplot['axr'], limits)
self.canvas.draw()
def __reset(self):
for i, subplot in enumerate(self.__subplots):
axl = subplot['axl']
axr = subplot['axr']
axl.grid(True)
axl.yaxis.set_label_position('left')
axl.yaxis.tick_left()
# axr.grid(True)
# axr.yaxis.set_label_position('right')
# axr.yaxis.tick_right()
axl.change_geometry(self.__nsubplots, 1, self.__nsubplots - i)
self.figure.subplots_adjust()
def clear(self):
if not self.prefs.overlay:
for subplot in self.__subplots:
try:
subplot['axl'].clear()
subplot['axr'].clear()
except:
pass
self.__reset()
def subplot_new(self):
self.__isubplot = len(self.__subplots)
self.__nsubplots = self.__isubplot + 1
axl = self.figure.add_subplot(self.__nsubplots, 1, self.__nsubplots)
axr = None # axl.twinx()
self.__subplots.append({'axl': axl, 'axr': axr})
self.__reset()
def __init__(self):
IBackend.__init__(self)
from matplotlib.figure import Figure
self.figure = Figure()
self.__subplots = []
self.subplot_new()
class MatplotlibImageBackend(IMatplotlibBackend):
def render(self, filename):
self.figure.savefig(filename)
def __init__(self):
IMatplotlibBackend.__init__(self)
from matplotlib.backends.backend_cairo \
import FigureCanvasCairo as FigureCanvas
self.canvas = FigureCanvas(self.figure)
class MatplotlibWindowBackend(IMatplotlibBackend):
@Property
def widget():
def fget(self):
self.__widget = gtk.VBox()
self.__widget.pack_start(self.canvas)
self.__widget.pack_start(self.toolbar, False, False)
return self.__widget
def fset(self, widget):
self.__widget = widget
return locals()
def show(self):
self.__widget.show()
self.canvas.show()
self.toolbar.show()
def hide(self):
self.toolbar.hide()
self.canvas.hide()
self.__widget.hide()
def __init__(self):
IMatplotlibBackend.__init__(self)
from matplotlib.backends.backend_gtk \
import FigureCanvasGTK as FigureCanvas
self.canvas = FigureCanvas(self.figure)
from matplotlib.backends.backend_gtk \
import NavigationToolbar2GTK as NavigationToolbar
self.toolbar = NavigationToolbar(self.canvas, None)
##
## Containers...
##
class IContainer(Object):
"""The IContainer class is the base implementation for any class that contains IBackends.
e.g. console wrappers, image only wrappers, or fancy GUI toolkits like GTK+.
"""
@Property
def prefs():
def fget(self):
return self.backend.prefs
def fset(self, prefs):
self.backend.prefs = prefs
return locals()
def plotr(self, *args, **kwargs):
self.backend.plotr(*args, **kwargs)
def plotl(self, *args, **kwargs):
self.backend.plotl(*args, **kwargs)
def plotlh(self, *args, **kwargs):
self.backend.plotlh(*args, **kwargs)
def plotlv(self, *args, **kwargs):
self.backend.plotlv(*args, **kwargs)
def plotrh(self, *args, **kwargs):
self.backend.plotrh(*args, **kwargs)
def plotrv(self, *args, **kwargs):
self.backend.plotrv(*args, **kwargs)
def draw(self, *args, **kwargs):
self.backend.draw(*args, **kwargs)
def clear(self, *args, **kwargs):
self.backend.clear(*args, **kwargs)
def show(self, *args, **kwargs):
self.backend.show(*args, **kwargs)
def hide(self, *args, **kwargs):
self.backend.hide(*args, **kwargs)
def run(self, *args, **kwargs):
self.backend.run(*args, **kwargs)
def stripchart(self, *args, **kwargs):
self.backend.stripchart(*args, **kwargs)
def open(self, *args, **kwargs):
self.backend.open(*args, **kwargs)
def save(self, *args, **kwargs):
self.backend.save(*args, **kwargs)
class ConsoleContainer(IContainer):
def __init__(self):
IContainer.__init__(self)
self.backend = ConsoleBackend()
class ImageContainer(IContainer):
def draw(self, *args, **kwargs):
IContainer.draw(self, *args, **kwargs)
self.backend.render('foobar.png')
def __init__(self):
IContainer.__init__(self)
self.backend = MatplotlibImageBackend()
class WindowContainer(IContainer):
@Property
def prefs():
def fget(self):
return self.backend.prefs
def fset(self, prefs):
self.backend.prefs = prefs
widget = self.__builder.get_object('preferences_xmin_entry')
widget.set_text(str(self.backend.prefs.xmin))
widget = self.__builder.get_object('preferences_xmax_entry')
widget.set_text(str(self.backend.prefs.xmax))
widget = self.__builder.get_object('preferences_yminl_entry')
widget.set_text(str(self.backend.prefs.yminl))
widget = self.__builder.get_object('preferences_ymaxl_entry')
widget.set_text(str(self.backend.prefs.ymaxl))
widget = self.__builder.get_object('preferences_yminr_entry')
widget.set_text(str(self.backend.prefs.yminr))
widget = self.__builder.get_object('preferences_ymaxr_entry')
widget.set_text(str(self.backend.prefs.ymaxr))
return locals()
@Property
def title():
def fget(self):
return self.__title
def fset(self, title):
self.__title = title
if not self.__title:
return
self.__container.set_title(self.__title)
return locals()
def clear(self, *args, **kwargs):
IContainer.clear(self, *args, **kwargs)
def show(self, *args, **kwargs):
IContainer.show(self, *args, **kwargs)
self.__container.show()
def hide(self, *args, **kwargs):
IContainer.hide(self, *args, **kwargs)
self.__container.hide()
def run(self):
gtk.main()
def on_open_ok_button_clicked(self, widget, data=None):
self.__open.hide()
filename = self.__open.get_filename()
if not filename:
return
self.__open.set_filename(filename)
self.open(filename)
def on_open_cancel_button_clicked(self, widget, data=None):
self.__open.hide()
def on_open_chooser_delete_event(self, widget, data=None):
self.__open.hide()
return True
def on_plot_open_button_clicked(self, widget, data=None):
self.__open = self.__builder.get_object('open_chooser')
self.__open.show()
def on_plot_save_button_clicked(self, widget, data=None):
if not self.filename:
self.on_plot_saveas_button_clicked(self, None)
if self.filename:
self.save(self.filename)
def on_saveas_ok_button_clicked(self, widget, data=None):
self.__saveas.hide()
filename = self.__saveas.get_filename()
if not filename:
return
self.__saveas.set_filename(filename)
self.filename = filename
self.on_plot_save_button_clicked(self, None)
def on_saveas_cancel_button_clicked(self, widget, data=None):
self.__saveas.hide()
def on_saveas_chooser_delete_event(self, widget, data=None):
self.__saveas.hide()
return True
def on_plot_saveas_button_clicked(self, widget, data=None):
self.__saveas = self.__builder.get_object('saveas_chooser')
self.__saveas.show()
def on_preferences_ok_button_clicked(self, widget, data=None):
self.__preferences.hide()
widget = self.__builder.get_object('preferences_xmin_entry')
self.prefs.xmin = float(widget.get_text())
widget = self.__builder.get_object('preferences_xmax_entry')
self.prefs.xmax = float(widget.get_text())
widget = self.__builder.get_object('preferences_yminl_entry')
self.prefs.yminl = float(widget.get_text())
widget = self.__builder.get_object('preferences_ymaxl_entry')
self.prefs.ymaxl = float(widget.get_text())
widget = self.__builder.get_object('preferences_yminr_entry')
self.prefs.yminr = float(widget.get_text())
widget = self.__builder.get_object('preferences_ymaxr_entry')
self.prefs.ymaxr = float(widget.get_text())
self.draw()
def on_preferences_cancel_button_clicked(self, widget, data=None):
self.__preferences.hide()
def on_plot_preferences_button_clicked(self, widget, data=None):
self.__preferences = self.__builder.get_object('preferences_dialog')
self.__preferences.show()
def on_preferences_dialog_delete_event(self, widget, data=None):
self.__preferences.hide()
return True
def on_plot_overlay_button_toggled(self, widget, data=None):
self.prefs.overlay = widget.get_active()
def on_plot_window_destroy(self, widget, data=None):
gtk.main_quit()
def __init__(self, container):
IContainer.__init__(self)
self.backend = MatplotlibWindowBackend()
buildername = os.environ['GRIMA_ETC'] + os.sep + 'grima-plot.ui'
self.__builder = gtk.Builder()
self.__builder.add_from_file(buildername)
self.__builder.connect_signals(self)
if container:
self.__container = container
widget = self.__builder.get_object('plot_embeddable')
container = self.__builder.get_object('plot_container')
container.remove(widget)
self.__container.add(widget)
else:
self.__container = self.__builder.get_object('plot_window')
# TODO: this should not be needed, but somehow the widget show'ing order
# is all screwed up and the window doesn't display correctly without this
self.__container.set_default_size(700, 500)
widget = self.__builder.get_object('plot_backend')
widget.add(self.backend.widget)
# TODO:
self.filename = None
##
## This is the public API...
##
class Plot(Object):
def __create_display(self):
if not self.__enabled:
return
self.__display = None
if self.type == 'console':
self.__display = ConsoleContainer()
if self.type == 'image':
self.__display = ImageContainer()
if self.type == 'window':
self.__display = WindowContainer(self.container)
try:
self.__display.prefs = self
self.__display.title = self.title
except:
self.__enabled = False
@Property
def enabled():
def fget(self):
return self.__enabled
def fset(self, enabled):
self.__enabled = enabled
self.__create_display()
return locals()
@Property
def type():
def fget(self):
return self.__type
def fset(self, tipe):
self.__type = tipe
self.__create_display()
return locals()
@Property
def title():
def fget(self):
return self.__title
def fset(self, title):
self.__title = title
return locals()
def plotr(self, *args, **kwargs):
if not self.enabled:
return
self.__display.plotr(*args, **kwargs)
def plotl(self, *args, **kwargs):
if not self.enabled:
return
self.__display.plotl(*args, **kwargs)
def plotlh(self, *args, **kwargs):
if not self.enabled:
return
self.__display.plotlh(*args, **kwargs)
def plotlv(self, *args, **kwargs):
if not self.enabled:
return
self.__display.plotlv(*args, **kwargs)
def plotrh(self, *args, **kwargs):
if not self.enabled:
return
self.__display.plotrh(*args, **kwargs)
def plotrv(self, *args, **kwargs):
if not self.enabled:
return
self.__display.plotrv(*args, **kwargs)
def draw(self, *args, **kwargs):
if not self.enabled:
return
self.__display.draw(*args, **kwargs)
def clear(self, *args, **kwargs):
if not self.enabled:
return
self.__display.clear(*args, **kwargs)
def show(self, *args, **kwargs):
if not self.enabled:
return
self.__display.show(*args, **kwargs)
def hide(self, *args, **kwargs):
if not self.enabled:
return
self.__display.hide(*args, **kwargs)
def run(self, *args, **kwargs):
if not self.enabled:
return
self.__display.run(*args, **kwargs)
def stripchart(self, *args, **kwargs):
if not self.enabled:
return
self.__display.stripchart(*args, **kwargs)
def open(self, *args, **kwargs):
if not self.enabled:
return
self.__display.open(*args, **kwargs)
def save(self, *args, **kwargs):
if not self.enabled:
return
self.__display.save(*args, **kwargs)
def __init__(self):
Object.__init__(self)
self.enabled = False
self.container = None
self.type = 'console'
self.title = None
# TODO: use preferences
self.xmin = 0
self.xmax = 0
self.yminl = 0
self.ymaxl = 0
self.yminr = 0
self.ymaxr = 0
self.overlay = False
# $Id:$
#
# Local Variables:
# indent-tabs-mode: nil
# python-continuation-offset: 2
# python-indent: 8
# End:
# vim: ai et si sw=8 ts=8
| mit |
ben-hopps/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_cocoaagg.py | 70 | 8970 | from __future__ import division
"""
backend_cocoaagg.py
A native Cocoa backend via PyObjC in OSX.
Author: Charles Moad ([email protected])
Notes:
- Requires PyObjC (currently testing v1.3.7)
- The Tk backend works nicely on OSX. This code
primarily serves as an example of embedding a
matplotlib rendering context into a cocoa app
using a NSImageView.
"""
import os, sys
try:
import objc
except:
print >>sys.stderr, 'The CococaAgg backend required PyObjC to be installed!'
print >>sys.stderr, ' (currently testing v1.3.7)'
sys.exit()
from Foundation import *
from AppKit import *
from PyObjCTools import NibClassBuilder, AppHelper
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backend_bases import FigureManagerBase
from backend_agg import FigureCanvasAgg
from matplotlib._pylab_helpers import Gcf
mplBundle = NSBundle.bundleWithPath_(os.path.dirname(__file__))
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasCocoaAgg(thisFig)
return FigureManagerCocoaAgg(canvas, num)
def show():
for manager in Gcf.get_all_fig_managers():
manager.show()
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
class FigureCanvasCocoaAgg(FigureCanvasAgg):
def draw(self):
FigureCanvasAgg.draw(self)
def blit(self, bbox):
pass
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
NibClassBuilder.extractClasses('Matplotlib.nib', mplBundle)
class MatplotlibController(NibClassBuilder.AutoBaseClass):
# available outlets:
# NSWindow plotWindow
# PlotView plotView
def awakeFromNib(self):
# Get a reference to the active canvas
NSApp().setDelegate_(self)
self.app = NSApp()
self.canvas = Gcf.get_active().canvas
self.plotView.canvas = self.canvas
self.canvas.plotView = self.plotView
self.plotWindow.setAcceptsMouseMovedEvents_(True)
self.plotWindow.makeKeyAndOrderFront_(self)
self.plotWindow.setDelegate_(self)#.plotView)
self.plotView.setImageFrameStyle_(NSImageFrameGroove)
self.plotView.image_ = NSImage.alloc().initWithSize_((0,0))
self.plotView.setImage_(self.plotView.image_)
# Make imageview first responder for key events
self.plotWindow.makeFirstResponder_(self.plotView)
# Force the first update
self.plotView.windowDidResize_(self)
def windowDidResize_(self, sender):
self.plotView.windowDidResize_(sender)
def windowShouldClose_(self, sender):
#NSApplication.sharedApplication().stop_(self)
self.app.stop_(self)
return objc.YES
def saveFigure_(self, sender):
p = NSSavePanel.savePanel()
if(p.runModal() == NSFileHandlingPanelOKButton):
self.canvas.print_figure(p.filename())
def printFigure_(self, sender):
op = NSPrintOperation.printOperationWithView_(self.plotView)
op.runOperation()
class PlotWindow(NibClassBuilder.AutoBaseClass):
pass
class PlotView(NibClassBuilder.AutoBaseClass):
def updatePlot(self):
w,h = self.canvas.get_width_height()
# Remove all previous images
for i in xrange(self.image_.representations().count()):
self.image_.removeRepresentation_(self.image_.representations().objectAtIndex_(i))
self.image_.setSize_((w,h))
brep = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(
(self.canvas.buffer_rgba(0,0),'','','',''), # Image data
w, # width
h, # height
8, # bits per pixel
4, # components per pixel
True, # has alpha?
False, # is planar?
NSCalibratedRGBColorSpace, # color space
w*4, # row bytes
32) # bits per pixel
self.image_.addRepresentation_(brep)
self.setNeedsDisplay_(True)
def windowDidResize_(self, sender):
w,h = self.bounds().size
dpi = self.canvas.figure.dpi
self.canvas.figure.set_size_inches(w / dpi, h / dpi)
self.canvas.draw()
self.updatePlot()
def mouseDown_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseDown):
button = 1
else:
print >>sys.stderr, 'Unknown mouse event type:', type
button = -1
self.canvas.button_press_event(loc.x, loc.y, button)
self.updatePlot()
def mouseDragged_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
self.canvas.motion_notify_event(loc.x, loc.y)
self.updatePlot()
def mouseUp_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseUp):
button = 1
else:
print >>sys.stderr, 'Unknown mouse event type:', type
button = -1
self.canvas.button_release_event(loc.x, loc.y, button)
self.updatePlot()
def keyDown_(self, event):
self.canvas.key_press_event(event.characters())
self.updatePlot()
def keyUp_(self, event):
self.canvas.key_release_event(event.characters())
self.updatePlot()
class MPLBootstrap(NSObject):
# Loads the nib containing the PlotWindow and PlotView
def startWithBundle_(self, bundle):
#NSApplicationLoad()
if not bundle.loadNibFile_externalNameTable_withZone_('Matplotlib.nib', {}, None):
print >>sys.stderr, 'Unable to load Matplotlib Cocoa UI!'
sys.exit()
class FigureManagerCocoaAgg(FigureManagerBase):
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
try:
WMEnable('Matplotlib')
except:
# MULTIPLE FIGURES ARE BUGGY!
pass # If there are multiple figures we only need to enable once
#self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
# 'startWithBundle:',
# mplBundle,
# False)
def show(self):
# Load a new PlotWindow
self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
'startWithBundle:',
mplBundle,
False)
NSApplication.sharedApplication().run()
FigureManager = FigureManagerCocoaAgg
#### Everything below taken from PyObjC examples
#### This is a hack to allow python scripts to access
#### the window manager without running pythonw.
def S(*args):
return ''.join(args)
OSErr = objc._C_SHT
OUTPSN = 'o^{ProcessSerialNumber=LL}'
INPSN = 'n^{ProcessSerialNumber=LL}'
FUNCTIONS=[
# These two are public API
( u'GetCurrentProcess', S(OSErr, OUTPSN) ),
( u'SetFrontProcess', S(OSErr, INPSN) ),
# This is undocumented SPI
( u'CPSSetProcessName', S(OSErr, INPSN, objc._C_CHARPTR) ),
( u'CPSEnableForegroundOperation', S(OSErr, INPSN) ),
]
def WMEnable(name='Python'):
if isinstance(name, unicode):
name = name.encode('utf8')
mainBundle = NSBundle.mainBundle()
bPath = os.path.split(os.path.split(os.path.split(sys.executable)[0])[0])[0]
if mainBundle.bundlePath() == bPath:
return True
bndl = NSBundle.bundleWithPath_(objc.pathForFramework('/System/Library/Frameworks/ApplicationServices.framework'))
if bndl is None:
print >>sys.stderr, 'ApplicationServices missing'
return False
d = {}
objc.loadBundleFunctions(bndl, d, FUNCTIONS)
for (fn, sig) in FUNCTIONS:
if fn not in d:
print >>sys.stderr, 'Missing', fn
return False
err, psn = d['GetCurrentProcess']()
if err:
print >>sys.stderr, 'GetCurrentProcess', (err, psn)
return False
err = d['CPSSetProcessName'](psn, name)
if err:
print >>sys.stderr, 'CPSSetProcessName', (err, psn)
return False
err = d['CPSEnableForegroundOperation'](psn)
if err:
#print >>sys.stderr, 'CPSEnableForegroundOperation', (err, psn)
return False
err = d['SetFrontProcess'](psn)
if err:
print >>sys.stderr, 'SetFrontProcess', (err, psn)
return False
return True
| agpl-3.0 |
aetilley/scikit-learn | sklearn/neighbors/classification.py | 106 | 13987 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array, shape = (n_samples, n_features)
A 2-D array representing the test points.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
Cophy08/ggplot | ggplot/themes/theme_seaborn.py | 12 | 8893 | from .theme import theme
import matplotlib as mpl
import matplotlib.pyplot as plt
from numpy import isreal
class theme_seaborn(theme):
"""
Theme for seaborn.
Copied from mwaskom's seaborn:
https://github.com/mwaskom/seaborn/blob/master/seaborn/rcmod.py
Parameters
----------
style: whitegrid | darkgrid | nogrid | ticks
Style of axis background.
context: notebook | talk | paper | poster
Intended context for resulting figures.
gridweight: extra heavy | heavy | medium | light
Width of the grid lines. None
"""
def __init__(self, style="whitegrid", gridweight=None, context="notebook"):
super(theme_seaborn, self).__init__(complete=True)
self.style = style
self.gridweight = gridweight
self.context = context
self._set_theme_seaborn_rcparams(self._rcParams, self.style, self.gridweight, self.context)
def _set_theme_seaborn_rcparams(self, rcParams, style, gridweight, context):
"""helper method to set the default rcParams and other theming relevant
things
"""
# select grid line width:
gridweights = {
'extra heavy': 1.5,
'heavy': 1.1,
'medium': 0.8,
'light': 0.5,
}
if gridweight is None:
if context == "paper":
glw = gridweights["medium"]
else:
glw = gridweights['extra heavy']
elif isreal(gridweight):
glw = gridweight
else:
glw = gridweights[gridweight]
if style == "darkgrid":
lw = .8 if context == "paper" else 1.5
ax_params = {"axes.facecolor": "#EAEAF2",
"axes.edgecolor": "white",
"axes.linewidth": 0,
"axes.grid": True,
"axes.axisbelow": True,
"grid.color": "w",
"grid.linestyle": "-",
"grid.linewidth": glw}
elif style == "whitegrid":
lw = 1.0 if context == "paper" else 1.7
ax_params = {"axes.facecolor": "white",
"axes.edgecolor": "#CCCCCC",
"axes.linewidth": lw,
"axes.grid": True,
"axes.axisbelow": True,
"grid.color": "#DDDDDD",
"grid.linestyle": "-",
"grid.linewidth": glw}
elif style == "nogrid":
ax_params = {"axes.grid": False,
"axes.facecolor": "white",
"axes.edgecolor": "black",
"axes.linewidth": 1}
elif style == "ticks":
ticksize = 3. if context == "paper" else 6.
tickwidth = .5 if context == "paper" else 1
ax_params = {"axes.grid": False,
"axes.facecolor": "white",
"axes.edgecolor": "black",
"axes.linewidth": 1,
"xtick.direction": "out",
"ytick.direction": "out",
"xtick.major.width": tickwidth,
"ytick.major.width": tickwidth,
"xtick.minor.width": tickwidth,
"xtick.minor.width": tickwidth,
"xtick.major.size": ticksize,
"xtick.minor.size": ticksize / 2,
"ytick.major.size": ticksize,
"ytick.minor.size": ticksize / 2}
rcParams.update(ax_params)
# Determine the font sizes
if context == "talk":
font_params = {"axes.labelsize": 16,
"axes.titlesize": 19,
"xtick.labelsize": 14,
"ytick.labelsize": 14,
"legend.fontsize": 13,
}
elif context == "notebook":
font_params = {"axes.labelsize": 11,
"axes.titlesize": 12,
"xtick.labelsize": 10,
"ytick.labelsize": 10,
"legend.fontsize": 10,
}
elif context == "poster":
font_params = {"axes.labelsize": 18,
"axes.titlesize": 22,
"xtick.labelsize": 16,
"ytick.labelsize": 16,
"legend.fontsize": 16,
}
elif context == "paper":
font_params = {"axes.labelsize": 8,
"axes.titlesize": 12,
"xtick.labelsize": 8,
"ytick.labelsize": 8,
"legend.fontsize": 8,
}
rcParams.update(font_params)
# Set other parameters
rcParams.update({
"lines.linewidth": 1.1 if context == "paper" else 1.4,
"patch.linewidth": .1 if context == "paper" else .3,
"xtick.major.pad": 3.5 if context == "paper" else 7,
"ytick.major.pad": 3.5 if context == "paper" else 7,
})
# # Set the constant defaults
# mpl.rc("font", family=font)
# mpl.rc("legend", frameon=False, numpoints=1)
# mpl.rc("lines", markeredgewidth=0, solid_capstyle="round")
# mpl.rc("figure", figsize=(8, 5.5))
# mpl.rc("image", cmap="cubehelix")
rcParams["timezone"] = "UTC"
# rcParams["lines.linewidth"] = "1.0"
# rcParams["lines.antialiased"] = "True"
# rcParams["patch.linewidth"] = "0.5"
# rcParams["patch.facecolor"] = "348ABD"
# rcParams["patch.edgecolor"] = "#E5E5E5"
rcParams["patch.antialiased"] = "True"
rcParams["font.family"] = "sans-serif"
rcParams["font.size"] = "12.0"
rcParams["font.serif"] = ["Times", "Palatino", "New Century Schoolbook",
"Bookman", "Computer Modern Roman",
"Times New Roman"]
rcParams["font.sans-serif"] = ["Helvetica", "Avant Garde",
"Computer Modern Sans serif", "Arial"]
# rcParams["axes.facecolor"] = "#E5E5E5"
# rcParams["axes.edgecolor"] = "bcbcbc"
# rcParams["axes.linewidth"] = "1"
# rcParams["axes.grid"] = "True"
# rcParams["axes.titlesize"] = "x-large"
# rcParams["axes.labelsize"] = "large"
# rcParams["axes.labelcolor"] = "black"
# rcParams["axes.axisbelow"] = "True"
rcParams["axes.color_cycle"] = ["#333333", "348ABD", "7A68A6", "A60628",
"467821", "CF4457", "188487", "E24A33"]
# rcParams["grid.color"] = "white"
# rcParams["grid.linewidth"] = "1.4"
# rcParams["grid.linestyle"] = "solid"
# rcParams["xtick.major.size"] = "0"
# rcParams["xtick.minor.size"] = "0"
# rcParams["xtick.major.pad"] = "6"
# rcParams["xtick.minor.pad"] = "6"
# rcParams["xtick.color"] = "#7F7F7F"
# rcParams["xtick.direction"] = "out" # pointing out of axis
# rcParams["ytick.major.size"] = "0"
# rcParams["ytick.minor.size"] = "0"
# rcParams["ytick.major.pad"] = "6"
# rcParams["ytick.minor.pad"] = "6"
# rcParams["ytick.color"] = "#7F7F7F"
# rcParams["ytick.direction"] = "out" # pointing out of axis
rcParams["legend.fancybox"] = "True"
rcParams["figure.figsize"] = "11, 8"
rcParams["figure.facecolor"] = "1.0"
rcParams["figure.edgecolor"] = "0.50"
rcParams["figure.subplot.hspace"] = "0.5"
def apply_theme(self, ax):
""""Styles x,y axes to appear like ggplot2
Must be called after all plot and axis manipulation operations have
been carried out (needs to know final tick spacing)
From: https://github.com/wrobstory/climatic/blob/master/climatic/stylers.py
"""
#Remove axis border
for child in ax.get_children():
if isinstance(child, mpl.spines.Spine):
child.set_alpha(0)
#Restyle the tick lines
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markersize(5)
line.set_markeredgewidth(1.4)
#Only show bottom left ticks
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
#Set minor grid lines
ax.grid(True, 'minor', color='#F2F2F2', linestyle='-', linewidth=0.7)
if not isinstance(ax.xaxis.get_major_locator(), mpl.ticker.LogLocator):
ax.xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
if not isinstance(ax.yaxis.get_major_locator(), mpl.ticker.LogLocator):
ax.yaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
| bsd-2-clause |
alexgleith/Quantum-GIS | python/plugins/sextante/algs/MeanAndStdDevPlot.py | 3 | 3304 | # -*- coding: utf-8 -*-
"""
***************************************************************************
MeanAndStdDevPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
import numpy as np
from PyQt4.QtCore import *
from qgis.core import *
from sextante.parameters.ParameterTable import ParameterTable
from sextante.parameters.ParameterTableField import ParameterTableField
from sextante.core.GeoAlgorithm import GeoAlgorithm
from sextante.outputs.OutputHTML import OutputHTML
from sextante.tools import *
from sextante.core.QGisLayers import QGisLayers
class MeanAndStdDevPlot(GeoAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
NAME_FIELD = "NAME_FIELD"
MEAN_FIELD = "MEAN_FIELD"
STDDEV_FIELD = "STDDEV_FIELD"
def processAlgorithm(self, progress):
uri = self.getParameterValue(self.INPUT)
layer = QGisLayers.getObjectFromUri(uri)
namefieldname = self.getParameterValue(self.NAME_FIELD)
meanfieldname = self.getParameterValue(self.MEAN_FIELD)
stddevfieldname = self.getParameterValue(self.STDDEV_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.getAttributeValues(layer, namefieldname, meanfieldname, stddevfieldname)
plt.close()
ind = np.arange(len(values[namefieldname]))
width = 0.8
plt.bar(ind, values[meanfieldname], width,
color='r',
yerr=values[stddevfieldname],
error_kw=dict(ecolor='yellow'))
plt.xticks(ind, values[namefieldname], rotation = 45)
plotFilename = output +".png"
lab.savefig(plotFilename)
f = open(output, "w")
f.write("<img src=\"" + plotFilename + "\"/>")
f.close()
def defineCharacteristics(self):
self.name = "Mean and standard deviation plot"
self.group = "Graphics"
self.addParameter(ParameterTable(self.INPUT, "Input table"))
self.addParameter(ParameterTableField(self.NAME_FIELD, "Category name field", self.INPUT,ParameterTableField.DATA_TYPE_ANY))
self.addParameter(ParameterTableField(self.MEAN_FIELD, "Mean field", self.INPUT))
self.addParameter(ParameterTableField(self.STDDEV_FIELD, "StdDev field", self.INPUT))
self.addOutput(OutputHTML(self.OUTPUT, "Output"))
| gpl-2.0 |
nmayorov/scikit-learn | examples/ensemble/plot_forest_importances.py | 168 | 1793 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
| bsd-3-clause |
newville/scikit-image | doc/ext/notebook.py | 44 | 3042 | __all__ = ['python_to_notebook', 'Notebook']
import json
import copy
import warnings
# Skeleton notebook in JSON format
skeleton_nb = """{
"metadata": {
"name":""
},
"nbformat": 3,
"nbformat_minor": 0,
"worksheets": [
{
"cells": [
{
"cell_type": "code",
"collapsed": false,
"input": [
"%matplotlib inline"
],
"language": "python",
"metadata": {},
"outputs": []
}
],
"metadata": {}
}
]
}"""
class Notebook(object):
"""
Notebook object for building an IPython notebook cell-by-cell.
"""
def __init__(self):
# cell type code
self.cell_code = {
'cell_type': 'code',
'collapsed': False,
'input': [
'# Code Goes Here'
],
'language': 'python',
'metadata': {},
'outputs': []
}
# cell type markdown
self.cell_md = {
'cell_type': 'markdown',
'metadata': {},
'source': [
'Markdown Goes Here'
]
}
self.template = json.loads(skeleton_nb)
self.cell_type = {'input': self.cell_code, 'source': self.cell_md}
self.valuetype_to_celltype = {'code': 'input', 'markdown': 'source'}
def add_cell(self, value, cell_type='code'):
"""Add a notebook cell.
Parameters
----------
value : str
Cell content.
cell_type : {'code', 'markdown'}
Type of content (default is 'code').
"""
if cell_type in ['markdown', 'code']:
key = self.valuetype_to_celltype[cell_type]
cells = self.template['worksheets'][0]['cells']
cells.append(copy.deepcopy(self.cell_type[key]))
# assign value to the last cell
cells[-1][key] = value
else:
warnings.warn('Ignoring unsupported cell type (%s)' % cell_type)
def json(self):
"""Return a JSON representation of the notebook.
Returns
-------
str
JSON notebook.
"""
return json.dumps(self.template, indent=2)
def test_notebook_basic():
nb = Notebook()
assert(json.loads(nb.json()) == json.loads(skeleton_nb))
def test_notebook_add():
nb = Notebook()
str1 = 'hello world'
str2 = 'f = lambda x: x * x'
nb.add_cell(str1, cell_type='markdown')
nb.add_cell(str2, cell_type='code')
d = json.loads(nb.json())
cells = d['worksheets'][0]['cells']
values = [c['input'] if c['cell_type'] == 'code' else c['source']
for c in cells]
assert values[1] == str1
assert values[2] == str2
assert cells[1]['cell_type'] == 'markdown'
assert cells[2]['cell_type'] == 'code'
if __name__ == "__main__":
import numpy.testing as npt
npt.run_module_suite()
| bsd-3-clause |
kiwicopple/MyMDb | venv/Lib/site-packages/sphinx/ext/inheritance_diagram.py | 8 | 14080 | # -*- coding: utf-8 -*-
r"""
sphinx.ext.inheritance_diagram
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import inspect
import __builtin__ as __builtin__ # as __builtin__ is for lib2to3 compatibility
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.ext.graphviz import render_dot_html, render_dot_latex, \
render_dot_texinfo
from sphinx.pycode import ModuleAnalyzer
from sphinx.util import force_decode
from sphinx.util.compat import Directive
class_sig_re = re.compile(r'''^([\w.]*\.)? # module names
(\w+) \s* $ # class/final module name
''', re.VERBOSE)
class InheritanceException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that they inherit
from all the way to the root "object", and then is able to generate a
graphviz dot graph from them.
"""
def __init__(self, class_names, currmodule, show_builtins=False,
private_bases=False, parts=0):
"""*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
classes = self._import_classes(class_names, currmodule)
self.class_info = self._class_info(classes, show_builtins,
private_bases, parts)
if not self.class_info:
raise InheritanceException('No classes found for '
'inheritance diagram')
def _import_class_or_module(self, name, currmodule):
"""Import a class using its fully-qualified *name*."""
try:
path, base = class_sig_re.match(name).groups()
except (AttributeError, ValueError):
raise InheritanceException('Invalid class or module %r specified '
'for inheritance diagram' % name)
fullname = (path or '') + base
path = (path and path.rstrip('.') or '')
# two possibilities: either it is a module, then import it
try:
__import__(fullname)
todoc = sys.modules[fullname]
except ImportError:
# else it is a class, then import the module
if not path:
if currmodule:
# try the current module
path = currmodule
else:
raise InheritanceException(
'Could not import class %r specified for '
'inheritance diagram' % base)
try:
__import__(path)
todoc = getattr(sys.modules[path], base)
except (ImportError, AttributeError):
raise InheritanceException(
'Could not import class or module %r specified for '
'inheritance diagram' % (path + '.' + base))
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise InheritanceException('%r specified for inheritance diagram is '
'not a class or module' % name)
def _import_classes(self, class_names, currmodule):
"""Import a list of classes."""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name, currmodule))
return classes
def _class_info(self, classes, show_builtins, private_bases, parts):
"""Return name and bases for all classes that are ancestors of
*classes*.
*parts* gives the number of dotted name parts that is removed from the
displayed node names.
"""
all_classes = {}
builtins = vars(__builtin__).values()
def recurse(cls):
if not show_builtins and cls in builtins:
return
if not private_bases and cls.__name__.startswith('_'):
return
nodename = self.class_name(cls, parts)
fullname = self.class_name(cls, 0)
# Use first line of docstring as tooltip, if available
tooltip = None
try:
if cls.__doc__:
enc = ModuleAnalyzer.for_module(cls.__module__).encoding
doc = cls.__doc__.strip().split("\n")[0]
if not isinstance(doc, unicode):
doc = force_decode(doc, enc)
if doc:
tooltip = '"%s"' % doc.replace('"', '\\"')
except Exception: # might raise AttributeError for strange classes
pass
baselist = []
all_classes[cls] = (nodename, fullname, baselist, tooltip)
for base in cls.__bases__:
if not show_builtins and base in builtins:
continue
if not private_bases and base.__name__.startswith('_'):
continue
baselist.append(self.class_name(base, parts))
if base not in all_classes:
recurse(base)
for cls in classes:
recurse(cls)
return all_classes.values()
def class_name(self, cls, parts=0):
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = '%s.%s' % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""Get all of the class names involved in the graph."""
return [fullname for (_, fullname, _, _) in self.class_info]
# These are the default attrs for graphviz
default_graph_attrs = {
'rankdir': 'LR',
'size': '"8.0, 12.0"',
}
default_node_attrs = {
'shape': 'box',
'fontsize': 10,
'height': 0.25,
'fontname': '"Vera Sans, DejaVu Sans, Liberation Sans, '
'Arial, Helvetica, sans"',
'style': '"setlinewidth(0.5)"',
}
default_edge_attrs = {
'arrowsize': 0.5,
'style': '"setlinewidth(0.5)"',
}
def _format_node_attrs(self, attrs):
return ','.join(['%s=%s' % x for x in attrs.items()])
def _format_graph_attrs(self, attrs):
return ''.join(['%s=%s;\n' % x for x in attrs.items()])
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
"""Generate a graphviz dot graph from the classes that were passed in
to __init__.
*name* is the name of the graph.
*urls* is a dictionary mapping class names to HTTP URLs.
*graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing
key/value pairs to pass on as graphviz properties.
"""
g_attrs = self.default_graph_attrs.copy()
n_attrs = self.default_node_attrs.copy()
e_attrs = self.default_edge_attrs.copy()
g_attrs.update(graph_attrs)
n_attrs.update(node_attrs)
e_attrs.update(edge_attrs)
if env:
g_attrs.update(env.config.inheritance_graph_attrs)
n_attrs.update(env.config.inheritance_node_attrs)
e_attrs.update(env.config.inheritance_edge_attrs)
res = []
res.append('digraph %s {\n' % name)
res.append(self._format_graph_attrs(g_attrs))
for name, fullname, bases, tooltip in sorted(self.class_info):
# Write the node
this_node_attrs = n_attrs.copy()
if fullname in urls:
this_node_attrs['URL'] = '"%s"' % urls[fullname]
if tooltip:
this_node_attrs['tooltip'] = tooltip
res.append(' "%s" [%s];\n' %
(name, self._format_node_attrs(this_node_attrs)))
# Write the edges
for base_name in bases:
res.append(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_attrs(e_attrs)))
res.append('}\n')
return ''.join(res)
class inheritance_diagram(nodes.General, nodes.Element):
"""
A docutils node to use as a placeholder for the inheritance diagram.
"""
pass
class InheritanceDiagram(Directive):
"""
Run when the inheritance_diagram directive is first encountered.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'parts': directives.nonnegative_int,
'private-bases': directives.flag,
}
def run(self):
node = inheritance_diagram()
node.document = self.state.document
env = self.state.document.settings.env
class_names = self.arguments[0].split()
class_role = env.get_domain('py').role('class')
# Store the original content for use as a hash
node['parts'] = self.options.get('parts', 0)
node['content'] = ', '.join(class_names)
# Create a graph starting with the list of classes
try:
graph = InheritanceGraph(
class_names, env.temp_data.get('py:module'),
parts=node['parts'],
private_bases='private-bases' in self.options)
except InheritanceException, err:
return [node.document.reporter.warning(err.args[0],
line=self.lineno)]
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = class_role(
'class', ':class:`%s`' % name, name, 0, self.state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
return [node]
def get_graph_hash(node):
encoded = (node['content'] + str(node['parts'])).encode('utf-8')
return md5(encoded).hexdigest()[-10:]
def html_visit_inheritance_diagram(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
dotcode = graph.generate_dot(name, urls, env=self.builder.env)
render_dot_html(self, node, dotcode, [], 'inheritance', 'inheritance',
alt='Inheritance diagram of ' + node['content'])
raise nodes.SkipNode
def latex_visit_inheritance_diagram(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph.generate_dot(name, env=self.builder.env,
graph_attrs={'size': '"6.0,6.0"'})
render_dot_latex(self, node, dotcode, [], 'inheritance')
raise nodes.SkipNode
def texinfo_visit_inheritance_diagram(self, node):
"""
Output the graph for Texinfo. This will insert a PNG.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph.generate_dot(name, env=self.builder.env,
graph_attrs={'size': '"6.0,6.0"'})
render_dot_texinfo(self, node, dotcode, [], 'inheritance')
raise nodes.SkipNode
def skip(self, node):
raise nodes.SkipNode
def setup(app):
app.setup_extension('sphinx.ext.graphviz')
app.add_node(
inheritance_diagram,
latex=(latex_visit_inheritance_diagram, None),
html=(html_visit_inheritance_diagram, None),
text=(skip, None),
man=(skip, None),
texinfo=(texinfo_visit_inheritance_diagram, None))
app.add_directive('inheritance-diagram', InheritanceDiagram)
app.add_config_value('inheritance_graph_attrs', {}, False),
app.add_config_value('inheritance_node_attrs', {}, False),
app.add_config_value('inheritance_edge_attrs', {}, False),
| mit |
saiwing-yeung/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
mattilyra/scikit-learn | examples/classification/plot_digits_classification.py | 34 | 2409 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 4 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# matplotlib.pyplot.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
billy-inn/scikit-learn | sklearn/learning_curve.py | 110 | 13467 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
janhahne/nest-simulator | pynest/nest/lib/hl_api_types.py | 1 | 29228 | # -*- coding: utf-8 -*-
#
# hl_api_types.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Classes defining the different PyNEST types
"""
from ..ll_api import *
from .. import pynestkernel as kernel
from .hl_api_helper import *
from .hl_api_simulation import GetKernelStatus
import numpy
try:
import pandas
HAVE_PANDAS = True
except ImportError:
HAVE_PANDAS = False
__all__ = [
'SynapseCollection',
'CreateParameter',
'NodeCollection',
'Mask',
'Parameter',
]
def CreateParameter(parametertype, specs):
"""
Create a parameter.
Parameters
----------
parametertype : string
Parameter type with or without distance dependency.
Can be one of the following: 'constant', 'linear', 'exponential', 'gaussian', 'gaussian2D',
'uniform', 'normal', 'lognormal', 'distance', 'position'
specs : dict
Dictionary specifying the parameters of the provided
`parametertype`, see **Parameter types**.
Returns
-------
``Parameter``:
Object representing the parameter
Notes
-----
- Instead of using `CreateParameter` you can also use the various parametrizations embedded in NEST. See for
instance :py:func:`.uniform`.
**Parameter types**
Some available parameter types (`parametertype` parameter), their function and
acceptable keys for their corresponding specification dictionaries
* Constant
::
'constant' :
{'value' : float} # constant value
* Randomization
::
# random parameter with uniform distribution in [min,max)
'uniform' :
{'min' : float, # minimum value, default: 0.0
'max' : float} # maximum value, default: 1.0
# random parameter with normal distribution, optionally truncated
# to [min,max)
'normal':
{'mean' : float, # mean value, default: 0.0
'sigma': float, # standard deviation, default: 1.0
'min' : float, # minimum value, default: -inf
'max' : float} # maximum value, default: +inf
# random parameter with lognormal distribution,
# optionally truncated to [min,max)
'lognormal' :
{'mu' : float, # mean value of logarithm, default: 0.0
'sigma': float, # standard deviation of log, default: 1.0
'min' : float, # minimum value, default: -inf
'max' : float} # maximum value, default: +inf
"""
return sli_func('CreateParameter', {parametertype: specs})
class NodeCollectionIterator(object):
"""
Iterator class for `NodeCollection`.
Returns
-------
`NodeCollection`:
Single node ID `NodeCollection` of respective iteration.
"""
def __init__(self, nc):
self._nc = nc
self._increment = 0
def __iter__(self):
return self
def __next__(self):
if self._increment > len(self._nc) - 1:
raise StopIteration
val = sli_func('Take', self._nc._datum, [self._increment + (self._increment >= 0)])
self._increment += 1
return val
next = __next__ # Python2.x
class NodeCollection(object):
"""
Class for `NodeCollection`.
`NodeCollection` represents the nodes of a network. The class supports
iteration, concatination, indexing, slicing, membership, length, convertion to and
from lists, test for membership, and test for equality. By using the
membership functions :py:func:`get()` and :py:func:`set()`, you can get and set desired
parameters.
A `NodeCollection` is created by the :py:func:`.Create` function, or by converting a
list of nodes to a `NodeCollection` with ``nest.NodeCollection(list)``.
If your nodes have spatial extent, use the member parameter ``spatial`` to get the spatial information.
Example
-------
::
import nest
nest.ResetKernel()
# Create NodeCollection representing nodes
nc = nest.Create('iaf_psc_alpha', 10)
# Convert from list
node_ids_in = [2, 4, 6, 8]
new_nc = nest.NodeCollection(node_ids_in)
# Convert to list
nc_list = nc.tolist()
# Concatenation
Enrns = nest.Create('aeif_cond_alpha', 600)
Inrns = nest.Create('iaf_psc_alpha', 400)
nrns = Enrns + Inrns
# Slicing and membership
print(new_nc[2])
print(new_nc[1:2])
6 in new_nc
"""
_datum = None
def __init__(self, data):
if isinstance(data, kernel.SLIDatum):
if data.dtype != "nodecollectiontype":
raise TypeError("Need NodeCollection Datum.")
self._datum = data
else:
# Data from user, must be converted to datum
# Data can be anything that can be converted to a NodeCollection,
# such as list, tuple, etc.
nc = sli_func('cvnodecollection', data)
self._datum = nc._datum
def __iter__(self):
return NodeCollectionIterator(self)
def __add__(self, other):
if not isinstance(other, NodeCollection):
raise NotImplementedError()
return sli_func('join', self._datum, other._datum)
def __getitem__(self, key):
if isinstance(key, slice):
if key.start is None:
start = 1
else:
start = key.start + 1 if key.start >= 0 else key.start
if key.stop is None:
stop = self.__len__()
else:
stop = key.stop if key.stop >= 0 else key.stop
step = 1 if key.step is None else key.step
return sli_func('Take', self._datum, [start, stop, step])
elif isinstance(key, (int, numpy.integer)):
return sli_func('Take', self._datum, [key + (key >= 0)])
else:
raise IndexError('only integers and slices are valid indices')
def __contains__(self, node_id):
return sli_func('MemberQ', self._datum, node_id)
def __eq__(self, other):
if not isinstance(other, NodeCollection):
raise NotImplementedError('Cannot compare NodeCollection to {}'.format(type(other).__name__))
if self.__len__() != other.__len__():
return False
return sli_func('eq', self, other)
def __neq__(self, other):
if not isinstance(other, NodeCollection):
raise NotImplementedError()
return not self == other
def __len__(self):
return sli_func('size', self._datum)
def __str__(self):
return sli_func('pcvs', self._datum)
def __repr__(self):
return sli_func('pcvs', self._datum)
def get(self, *params, **kwargs):
"""
Get parameters from nodes.
Parameters
----------
params : str or list, optional
Parameters to get from the nodes. It must be one of the following:
- A single string.
- A list of strings.
- One or more strings, followed by a string or list of strings.
This is for hierarchical addressing.
output : str, ['pandas','json'], optional
If the returned data should be in a Pandas DataFrame or in a
JSON serializable format.
Returns
-------
int or float:
If there is a single node in the `NodeCollection`, and a single
parameter in params.
array_like:
If there are multiple nodes in the `NodeCollection`, and a single
parameter in params.
dict:
If there are multiple parameters in params. Or, if no parameters
are specified, a dictionary containing aggregated parameter-values
for all nodes is returned.
DataFrame:
Pandas Data frame if output should be in pandas format.
Raises
------
TypeError
If the input params are of the wrong form.
KeyError
If the specified parameter does not exist for the nodes.
See Also
--------
set
"""
# ------------------------- #
# Checks of input #
# ------------------------- #
if not kwargs:
output = ''
elif 'output' in kwargs:
output = kwargs['output']
if output == 'pandas' and not HAVE_PANDAS:
raise ImportError('Pandas could not be imported')
else:
raise TypeError('Got unexpected keyword argument')
pandas_output = output == 'pandas'
if len(params) == 0:
# get() is called without arguments
result = sli_func('get', self._datum)
elif len(params) == 1:
# params is a tuple with a string or list of strings
result = get_parameters(self, params[0])
else:
# Hierarchical addressing
result = get_parameters_hierarchical_addressing(self, params)
if pandas_output:
index = self.get('global_id')
if len(params) == 1 and is_literal(params[0]):
# params is a string
result = {params[0]: result}
elif len(params) > 1 and is_literal(params[1]):
# hierarchical, single string
result = {params[1]: result}
if len(self) == 1:
index = [index]
result = {key: [val] for key, val in result.items()}
result = pandas.DataFrame(result, index=index)
elif output == 'json':
result = to_json(result)
return result
def set(self, params=None, **kwargs):
"""
Set the parameters of nodes to params.
NB! This is almost the same implementation as `SetStatus`.
If `kwargs` is given, it has to be names and values of an attribute as keyword argument pairs. The values
can be single values or list of the same size as the `NodeCollection`.
Parameters
----------
params : str or dict or list
Dictionary of parameters or list of dictionaries of parameters of
same length as the `NodeCollection`.
kwargs : keyword argument pairs
Named arguments of parameters of the elements in the `NodeCollection`.
Raises
------
TypeError
If the input params are of the wrong form.
KeyError
If the specified parameter does not exist for the nodes.
"""
if kwargs and params is None:
params = kwargs
elif kwargs and params:
raise TypeError("must either provide params or kwargs, but not both.")
if isinstance(params, dict) and self[0].get('local'):
contains_list = [is_iterable(vals) and not is_iterable(self[0].get(key)) for key, vals in params.items()]
if any(contains_list):
temp_param = [{} for _ in range(self.__len__())]
for key, vals in params.items():
if not is_iterable(vals):
for temp_dict in temp_param:
temp_dict[key] = vals
else:
for i, temp_dict in enumerate(temp_param):
temp_dict[key] = vals[i]
params = temp_param
if (isinstance(params, (list, tuple)) and self.__len__() != len(params)):
raise TypeError(
"status dict must be a dict, or a list of dicts of length len(nodes)")
sli_func('SetStatus', self._datum, params)
def tolist(self):
"""
Convert `NodeCollection` to list.
"""
if self.__len__() == 0:
return []
return list(self.get('global_id')) if self.__len__() > 1 else [self.get('global_id')]
def index(self, node_id):
"""
Find the index of a node ID in the `NodeCollection`.
Parameters
----------
node_id : int
Global ID to be found.
Raises
------
ValueError
If the node ID is not in the `NodeCollection`.
"""
index = sli_func('Find', self._datum, node_id)
if index == -1:
raise ValueError('{} is not in NodeCollection'.format(node_id))
return index
def __getattr__(self, attr):
if attr == 'spatial':
metadata = sli_func('GetMetadata', self._datum)
val = metadata if metadata else None
super().__setattr__(attr, val)
return self.spatial
return self.get(attr)
def __setattr__(self, attr, value):
# `_datum` is the only property of NodeCollection that should not be
# interpreted as a property of the model
if attr == '_datum':
super().__setattr__(attr, value)
else:
self.set({attr: value})
class SynapseCollectionIterator(object):
"""
Iterator class for SynapseCollection.
"""
def __init__(self, synapse_collection):
self._iter = iter(synapse_collection._datum)
def __iter__(self):
return self
def __next__(self):
return SynapseCollection(next(self._iter))
next = __next__ # Python2.x
class SynapseCollection(object):
"""
Class for Connections.
`SynapseCollection` represents the connections of a network. The class supports indexing, iteration, length and
equality. You can get and set connection parameters by using the membership functions :py:func:`get()` and
:py:func:`set()`. By using the membership function :py:func:`sources()` you get an iterator over
source nodes, while :py:func:`targets()` returns an interator over the target nodes of the connections.
A SynapseCollection is created by the :py:func:`.GetConnections` function.
"""
_datum = None
def __init__(self, data):
if isinstance(data, list):
for datum in data:
if (not isinstance(datum, kernel.SLIDatum) or
datum.dtype != "connectiontype"):
raise TypeError("Expected Connection Datum.")
self._datum = data
elif data is None:
# We can have an empty SynapseCollection if there are no connections.
self._datum = data
else:
if (not isinstance(data, kernel.SLIDatum) or
data.dtype != "connectiontype"):
raise TypeError("Expected Connection Datum.")
# self._datum needs to be a list of Connection datums.
self._datum = [data]
def __iter__(self):
return SynapseCollectionIterator(self)
def __len__(self):
if self._datum is None:
return 0
return len(self._datum)
def __eq__(self, other):
if not isinstance(other, SynapseCollection):
raise NotImplementedError()
if self.__len__() != other.__len__():
return False
self_get = self.get(['source', 'target', 'target_thread',
'synapse_id', 'port'])
other_get = other.get(['source', 'target', 'target_thread',
'synapse_id', 'port'])
if self_get != other_get:
return False
return True
def __neq__(self, other):
if not isinstance(other, SynapseCollection):
raise NotImplementedError()
return not self == other
def __getitem__(self, key):
if isinstance(key, slice):
return SynapseCollection(self._datum[key])
else:
return SynapseCollection([self._datum[key]])
def __str__(self):
"""
Printing a `SynapseCollection` returns something of the form:
*--------*-------------*
| source | 1, 1, 2, 2, |
*--------*-------------*
| target | 1, 2, 1, 2, |
*--------*-------------*
"""
srcs = self.get('source')
trgt = self.get('target')
if isinstance(srcs, int):
srcs = [srcs]
if isinstance(trgt, int):
trgt = [trgt]
# 35 is arbitrarily chosen.
if len(srcs) < 35:
source = '| source | ' + ''.join(str(e)+', ' for e in srcs) + '|'
target = '| target | ' + ''.join(str(e)+', ' for e in trgt) + '|'
else:
source = ('| source | ' + ''.join(str(e)+', ' for e in srcs[:15]) +
'... ' + ''.join(str(e)+', ' for e in srcs[-15:]) + '|')
target = ('| target | ' + ''.join(str(e)+', ' for e in trgt[:15]) +
'... ' + ''.join(str(e)+', ' for e in trgt[-15:]) + '|')
borderline_s = '*--------*' + '-'*(len(source) - 12) + '-*'
borderline_t = '*--------*' + '-'*(len(target) - 12) + '-*'
borderline_m = max(borderline_s, borderline_t)
result = (borderline_s + '\n' + source + '\n' + borderline_m + '\n' +
target + '\n' + borderline_t)
return result
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, attr, value):
# `_datum` is the only property of SynapseCollection that should not be
# interpreted as a property of the model
if attr == '_datum':
super().__setattr__(attr, value)
else:
self.set({attr: value})
def sources(self):
"""Returns iterator containing the source node IDs of the `SynapseCollection`."""
sources = self.get('source')
if not isinstance(sources, (list, tuple)):
sources = (sources,)
return iter(sources)
def targets(self):
"""Returns iterator containing the target node IDs of the `SynapseCollection`."""
targets = self.get('target')
if not isinstance(targets, (list, tuple)):
targets = (targets,)
return iter(targets)
def get(self, keys=None, output=''):
"""
Return a parameter dictionary of the connections.
If `keys` is a string, a list of values is returned, unless we have a
single connection, in which case the single value is returned.
`keys` may also be a list, in which case a dictionary with a list of
values is returned.
Parameters
----------
keys : str or list, optional
String or a list of strings naming model properties. get
then returns a single value or a dictionary with lists of values
belonging to the given `keys`.
output : str, ['pandas','json'], optional
If the returned data should be in a Pandas DataFrame or in a
JSON serializable format.
Returns
-------
dict:
All parameters, or, if keys is a list of strings, a dictionary with
lists of corresponding parameters
type:
If keys is a string, the corrsponding parameter(s) is returned
Raises
------
TypeError
If input params are of the wrong form.
KeyError
If the specified parameter does not exist for the connections.
"""
pandas_output = output == 'pandas'
if pandas_output and not HAVE_PANDAS:
raise ImportError('Pandas could not be imported')
# Return empty tuple if we have no connections or if we have done a
# nest.ResetKernel()
num_conn = GetKernelStatus('num_connections')
if self.__len__() == 0 or num_conn == 0:
return ()
if keys is None:
cmd = 'GetStatus'
elif is_literal(keys):
cmd = 'GetStatus {{ /{0} get }} Map'.format(keys)
elif is_iterable(keys):
keys_str = " ".join("/{0}".format(x) for x in keys)
cmd = 'GetStatus {{ [ [ {0} ] ] get }} Map'.format(keys_str)
else:
raise TypeError("keys should be either a string or an iterable")
sps(self._datum)
sr(cmd)
result = spp()
# Need to restructure the data.
final_result = restructure_data(result, keys)
if pandas_output:
index = (self.get('source') if self.__len__() > 1 else
(self.get('source'),))
if is_literal(keys):
final_result = {keys: final_result}
final_result = pandas.DataFrame(final_result, index=index)
elif output == 'json':
final_result = to_json(final_result)
return final_result
def set(self, params=None, **kwargs):
"""
Set the parameters of the connections to `params`.
NB! This is almost the same implementation as SetStatus
If `kwargs` is given, it has to be names and values of an attribute as keyword argument pairs. The values
can be single values or list of the same size as the `SynapseCollection`.
Parameters
----------
params : str or dict or list
Dictionary of parameters or list of dictionaries of parameters of
same length as the `SynapseCollection`.
kwargs : keyword argument pairs
Named arguments of parameters of the elements in the `SynapseCollection`.
Raises
------
TypeError
If input params are of the wrong form.
KeyError
If the specified parameter does not exist for the connections.
"""
# This was added to ensure that the function is a nop (instead of,
# for instance, raising an exception) when applied to an empty
# SynapseCollection, or after having done a nest.ResetKernel().
if self.__len__() == 0 or GetKernelStatus()['network_size'] == 0:
return
if (isinstance(params, (list, tuple)) and
self.__len__() != len(params)):
raise TypeError(
"status dict must be a dict, or a list of dicts of length "
"len(nodes)")
if kwargs and params is None:
params = kwargs
elif kwargs and params:
raise TypeError("must either provide params or kwargs, but not both.")
if isinstance(params, dict):
contains_list = [is_iterable(vals) and not is_iterable(self[0].get(key)) for key, vals in params.items()]
if any(contains_list):
temp_param = [{} for _ in range(self.__len__())]
for key, vals in params.items():
if not is_iterable(vals):
for temp_dict in temp_param:
temp_dict[key] = vals
else:
for i, temp_dict in enumerate(temp_param):
temp_dict[key] = vals[i]
params = temp_param
params = broadcast(params, self.__len__(), (dict,), "params")
sps(self._datum)
sps(params)
sr('2 arraystore')
sr('Transpose { arrayload pop SetStatus } forall')
class Mask(object):
"""
Class for spatial masks.
Masks are used when creating connections when nodes have spatial extent. A mask
describes the area of the pool population that shall be searched to find nodes to
connect to for any given node in the driver population. Masks are created using
the :py:func:`.CreateMask` command.
"""
_datum = None
# The constructor should not be called by the user
def __init__(self, datum):
"""Masks must be created using the CreateMask command."""
if not isinstance(datum, kernel.SLIDatum) or datum.dtype != "masktype":
raise TypeError("expected mask Datum")
self._datum = datum
# Generic binary operation
def _binop(self, op, other):
if not isinstance(other, Mask):
raise NotImplementedError()
return sli_func(op, self._datum, other._datum)
def __or__(self, other):
return self._binop("or", other)
def __and__(self, other):
return self._binop("and", other)
def __sub__(self, other):
return self._binop("sub", other)
def Inside(self, point):
"""
Test if a point is inside a mask.
Parameters
----------
point : tuple/list of float values
Coordinate of point
Returns
-------
out : bool
True if the point is inside the mask, False otherwise
"""
return sli_func("Inside", point, self._datum)
class Parameter(object):
"""
Class for parameters
A parameter may be used as a probability kernel when creating
connections and nodes or as synaptic parameters (such as weight and delay).
Parameters are created using the :py:func:`.CreateParameter` command.
"""
_datum = None
# The constructor should not be called by the user
def __init__(self, datum):
"""Parameters must be created using the CreateParameter command."""
if not isinstance(datum,
kernel.SLIDatum) or datum.dtype != "parametertype":
raise TypeError("expected parameter datum")
self._datum = datum
# Generic binary operation
def _binop(self, op, other, params=None):
if isinstance(other, (int, float)):
other = CreateParameter('constant', {'value': float(other)})
if not isinstance(other, Parameter):
raise NotImplementedError()
if params is None:
return sli_func(op, self._datum, other._datum)
else:
return sli_func(op, self._datum, other._datum, params)
def __add__(self, other):
return self._binop("add", other)
def __radd__(self, other):
return self + other
def __sub__(self, other):
return self._binop("sub", other)
def __rsub__(self, other):
return self * (-1) + other
def __neg__(self):
return self * (-1)
def __mul__(self, other):
return self._binop("mul", other)
def __rmul__(self, other):
return self * other
def __div__(self, other):
return self._binop("div", other)
def __truediv__(self, other):
return self._binop("div", other)
def __pow__(self, exponent):
return sli_func("pow", self._datum, float(exponent))
def __lt__(self, other):
return self._binop("compare", other, {'comparator': 0})
def __le__(self, other):
return self._binop("compare", other, {'comparator': 1})
def __eq__(self, other):
return self._binop("compare", other, {'comparator': 2})
def __ne__(self, other):
return self._binop("compare", other, {'comparator': 3})
def __ge__(self, other):
return self._binop("compare", other, {'comparator': 4})
def __gt__(self, other):
return self._binop("compare", other, {'comparator': 5})
def GetValue(self):
"""
Compute value of parameter.
Returns
-------
out : value
The value of the parameter
See also
--------
CreateParameter
Example
-------
::
import nest
# normal distribution parameter
P = nest.CreateParameter('normal', {'mean': 0.0, 'sigma': 1.0})
# get out value
P.GetValue()
"""
return sli_func("GetValue", self._datum)
def is_spatial(self):
return sli_func('ParameterIsSpatial', self._datum)
def apply(self, spatial_nc, positions=None):
if positions is None:
return sli_func('Apply', self._datum, spatial_nc)
else:
if len(spatial_nc) != 1:
raise ValueError('The NodeCollection must contain a single node ID only')
if not isinstance(positions, (list, tuple)):
raise TypeError('Positions must be a list or tuple of positions')
for pos in positions:
if not isinstance(pos, (list, tuple, numpy.ndarray)):
raise TypeError('Each position must be a list or tuple')
if len(pos) != len(positions[0]):
raise ValueError('All positions must have the same number of dimensions')
return sli_func('Apply', self._datum, {'source': spatial_nc, 'targets': positions})
| gpl-2.0 |
cl4rke/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
alshedivat/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 137 | 2219 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
from tensorflow.python.platform import test
class CategoricalTest(test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(
min_frequency=0, share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"],
["3", "Male"]])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
test.main()
| apache-2.0 |
manashmndl/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
scenarios/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/io_test.py | 18 | 5287 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class IOTest(test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(
a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(
start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
laranea/trading-with-python | sandbox/spreadCalculations.py | 78 | 1496 | '''
Created on 28 okt 2011
@author: jev
'''
from tradingWithPython import estimateBeta, Spread, returns, Portfolio, readBiggerScreener
from tradingWithPython.lib import yahooFinance
from pandas import DataFrame, Series
import numpy as np
import matplotlib.pyplot as plt
import os
symbols = ['SPY','IWM']
y = yahooFinance.HistData('temp.csv')
y.startDate = (2007,1,1)
df = y.loadSymbols(symbols,forceDownload=False)
#df = y.downloadData(symbols)
res = readBiggerScreener('CointPairs.csv')
#---check with spread scanner
#sp = DataFrame(index=symbols)
#
#sp['last'] = df.ix[-1,:]
#sp['targetCapital'] = Series({'SPY':100,'IWM':-100})
#sp['targetShares'] = sp['targetCapital']/sp['last']
#print sp
#The dollar-neutral ratio is about 1 * IWM - 1.7 * IWM. You will get the spread = zero (or probably very near zero)
#s = Spread(symbols, histClose = df)
#print s
#s.value.plot()
#print 'beta (returns)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='returns')
#print 'beta (log)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='log')
#print 'beta (standard)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='standard')
#p = Portfolio(df)
#p.setShares([1, -1.7])
#p.value.plot()
quote = yahooFinance.getQuote(symbols)
print quote
s = Spread(symbols,histClose=df, estimateBeta = False)
s.setLast(quote['last'])
s.setShares(Series({'SPY':1,'IWM':-1.7}))
print s
#s.value.plot()
#s.plot()
fig = figure(2)
s.plot()
| bsd-3-clause |
bentzinir/Buffe | Applications/mgail/environments/linemove_2D/environment.py | 1 | 8505 | import tensorflow as tf
import time
from ER import ER
import matplotlib.pyplot as plt
import numpy as np
import common
import sys
import subprocess
class ENVIRONMENT(object):
def __init__(self, run_dir):
self.name = 'linemove_2D'
game_params = {
'L': 2,
'dt': 0.15,
'v_0': 0.,
'v_max': 0.5,
}
self._connect(game_params)
self._train_params()
self.run_dir = run_dir
if self.collect_data:
self.record_expert()
sys.exit(0)
self._init_display()
# subprocess.Popen(self.run_dir + "./simulator")
# self.pipe_module = tf.load_op_library(self.run_dir + 'pipe.so')
plt.ion()
plt.show()
def record_expert(self):
er = ER(memory_size=self.er_agent_size,
state_dim=self.state_size,
action_dim=self.action_size,
reward_dim=1, # stub connection
batch_size=self.batch_size,
history_length=1)
all_states = np.zeros((1, self.state_size))
all_actions = np.zeros((1, self.action_size))
for i in xrange(self.n_episodes_expert):
# DEBUG !!! remove noise
states, actions = self.play_expert(noise=1)
rewards = np.zeros_like(actions[:, 0])
terminals = np.zeros_like(actions[:, 0])
terminals[-1] = 1
er.add(actions=actions, rewards=rewards, next_states=states, terminals=terminals)
all_states = np.append(all_states, states, axis=0)
all_actions = np.append(all_actions, actions, axis=0)
# visualization
if 0:
self.ax.clear()
# loop over trajectory points and plot marks
for s in states:
self.ax.scatter(s[2], s[3], s=40, marker='.', color='k')
self.ax.set_xlim(xmin=-self.L, xmax=2 * self.L)
self.ax.set_ylim(ymin=-self.L, ymax=2 * self.L)
self.fig.canvas.draw()
print("Processed trajectory %d/%d") % (i, self.n_episodes_expert)
er.states_mean = np.mean(all_states, axis=0)
er.actions_mean = np.mean(all_actions, axis=0)
er.states_std = np.std(all_states, axis=0)
er.actions_std = np.std(all_actions, axis=0)
common.save_er(directory=self.run_dir, module=er)
def play_expert(self, noise=1):
x_goals = np.asarray([[self.L, 0],
[self.L, self.L],
[0, self.L],
[0, 0]]).astype(np.float32)
goal = 0
x_ = np.asarray([[0, 0]]).astype(np.float32)
v_ = np.asarray([[0, 0]]).astype(np.float32)
actions = np.asarray([[0, 0]]).astype(np.float32)
a_min = float(self.L)/20
a_max = float(self.L)/10
for i in xrange(self.n_steps_test):
d = x_goals[goal] - x_[-1]
d_abs_clip = np.clip(abs(d), 0, a_max)
a = np.sign(d) * d_abs_clip
# add noise
if noise == 1:
a += np.random.normal(scale=a_min/20)
# transition
v = v_[-1] + self.dt * a
v = np.clip(v, -self.v_max, self.v_max)
x = x_[-1] + self.dt * v
x_ = np.append(x_, np.expand_dims(x, 0), axis=0)
v_ = np.append(v_, np.expand_dims(v, 0), axis=0)
actions = np.append(actions, np.expand_dims(a, 0), axis=0)
if np.linalg.norm(d) < float(self.L)/10:
goal += 1
goal = goal % 4
states = np.concatenate([v_, x_], axis=1)
return states, actions
def _step(self, a):
a = np.squeeze(a)
self.t += 1
v = self.state[:2] + self.dt * a
v = np.clip(v, -self.v_max, self.v_max)
x = self.state[2:] + self.dt * v
self.state = np.concatenate([v, x])
if self.t >= 100:
self.done = True
else:
self.done = False
reward = np.float32(0.1)
return self.state.astype(np.float32), reward, self.done
def step(self, a, mode):
if mode == 'tensorflow':
state, reward, done = tf.py_func(self._step, inp=[a], Tout=[tf.float32, tf.float32, tf.bool], name='env_step_func')
# DEBUG: flatten state. not sure if correctly
state = tf.reshape(state, shape=(self.state_size,))
done.set_shape(())
else:
state, reward, done = self._step(a)
info = 0
return state, reward, done, info
def reset(self):
self.t = 0
x_ = [0, 0]
v_ = [0, 0]
self.state = np.concatenate([v_, x_]).astype(np.float32)
return self.state
def get_status(self):
return self.done
def get_state(self):
return self.state
def reference_contour(self):
states, actions = self.play_expert(noise=0)
self.ax.clear()
# loop over trajectory points and plot marks
for s in states:
self.ax.scatter(s[2], s[3], s=40, marker='.', color='k')
self.ax.set_xlim(xmin=-self.L, xmax=2 * self.L)
self.ax.set_ylim(ymin=-self.L, ymax=2 * self.L)
self.fig.canvas.draw()
return states
def render(self):
self.ax.set_title(('Sample Trajectory\n time: %d' % self.t))
x_test = self.state[2:]
v_test = self.state[:2]
self.scat_agent.set_offsets(x_test)
self.scat_expert.set_offsets(self.x_expert[self.t])
self.ax.set_xlim(xmin=-self.L, xmax=2 * self.L)
self.ax.set_ylim(ymin=-self.L, ymax=2 * self.L)
self.fig.canvas.draw()
time.sleep(0.01)
def _connect(self, game_params):
self.dt = game_params['dt']
self.alpha_accel = 1.
self.alphas = np.asarray([self.alpha_accel])
self.v_0 = game_params['v_0']
self.v_max = game_params['v_max']
self.L = game_params['L']
self.state_size = 4
self.action_size = 2
self.action_space = np.asarray([None]*self.action_size)
def _init_display(self):
self.reset()
self.t = 0
if self.vis_flag:
self.fig = plt.figure()
self.ax = plt.subplot2grid((2, 2), (0, 0), colspan=2, rowspan=2)
self.x_expert = self.reference_contour()[:, 2:]
self.scat_agent = self.ax.scatter(self.state[2], self.state[3], s=180, marker='o', color='r')
self.scat_expert = self.ax.scatter(self.x_expert[0][0], self.x_expert[0][1], s=180, marker='o', color='b')
def _train_params(self):
self.collect_data = False
self.trained_model = None
self.train_mode = True
self.train_flags = [0, 1, 1] # [autoencoder, transition, discriminator/policy]
self.expert_data = 'expert_data/expert-2016-10-26-11-43.bin'
self.n_train_iters = 1000000
self.n_episodes_test = 1
self.kill_itr = self.n_train_iters
self.reward_kill_th = -1
self.test_interval = 2000
self.n_steps_test = 100
self.vis_flag = True
self.save_models = True
self.config_dir = None
self.tbptt = False
self.success_th = 3500
self.n_episodes_expert = 500
# Main parameters to play with:
self.reset_itrvl = 10000
self.n_reset_iters = 5000
self.model_identification_time = 2000
self.prep_time = 4000
self.collect_experience_interval = 15
self.n_steps_train = 25
self.discr_policy_itrvl = 500
self.K_T = 1
self.K_D = 1
self.K_P = 1
self.gamma = 0.99
self.batch_size = 70
self.policy_al_w = 1e-3
self.policy_tr_w = 1e-3
self.policy_accum_steps = 5
self.er_agent_size = 20000
self.total_trans_err_allowed = 1000# 1e-0
self.trans_loss_th = 2e-2
self.max_trans_iters = 2
self.t_size = [400, 200]
self.d_size = [100, 50]
self.p_size = [75, 25]
self.t_lr = 0.0005
self.d_lr = 0.001
self.p_lr = 0.0005
self.w_std = 0.25
self.noise_intensity = 3.
self.do_keep_prob = 0.8
# Parameters i don't want to play with
self.disc_as_classifier = True
self.sae_hidden_size = 80
self.sae_beta = 3.
self.sae_rho = 0.01
self.sae_batch = 25e3
self.use_sae = False
| mit |
harisbal/pandas | pandas/tests/arithmetic/test_object.py | 3 | 7634 | # -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for object dtype
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core import ops
from pandas import Series, Timestamp
# ------------------------------------------------------------------
# Comparisons
class TestObjectComparisons(object):
def test_comparison_object_numeric_nas(self):
ser = Series(np.random.randn(10), dtype=object)
shifted = ser.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
func = getattr(operator, op)
result = func(ser, shifted)
expected = func(ser.astype(float), shifted.astype(float))
tm.assert_series_equal(result, expected)
def test_object_comparisons(self):
ser = Series(['a', 'b', np.nan, 'c', 'a'])
result = ser == 'a'
expected = Series([True, False, False, False, True])
tm.assert_series_equal(result, expected)
result = ser < 'a'
expected = Series([False, False, False, False, False])
tm.assert_series_equal(result, expected)
result = ser != 'a'
expected = -(ser == 'a')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_more_na_comparisons(self, dtype):
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
tm.assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
tm.assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
tm.assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Arithmetic
class TestArithmetic(object):
@pytest.mark.parametrize("op", [operator.add, ops.radd])
@pytest.mark.parametrize("other", ["category", "Int64"])
def test_add_extension_scalar(self, other, box, op):
# GH#22378
# Check that scalars satisfying is_extension_array_dtype(obj)
# do not incorrectly try to dispatch to an ExtensionArray operation
arr = pd.Series(['a', 'b', 'c'])
expected = pd.Series([op(x, other) for x in arr])
arr = tm.box_expected(arr, box)
expected = tm.box_expected(expected, box)
result = op(arr, other)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pytest.param(pd.Index,
marks=pytest.mark.xfail(reason="Does not mask nulls",
strict=True, raises=TypeError)),
pd.Series,
pd.DataFrame
], ids=lambda x: x.__name__)
def test_objarr_add_str(self, box):
ser = pd.Series(['x', np.nan, 'x'])
expected = pd.Series(['xa', np.nan, 'xa'])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = ser + 'a'
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pytest.param(pd.Index,
marks=pytest.mark.xfail(reason="Does not mask nulls",
strict=True, raises=TypeError)),
pd.Series,
pd.DataFrame
], ids=lambda x: x.__name__)
def test_objarr_radd_str(self, box):
ser = pd.Series(['x', np.nan, 'x'])
expected = pd.Series(['ax', np.nan, 'ax'])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = 'a' + ser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('data', [
[1, 2, 3],
[1.1, 2.2, 3.3],
[Timestamp('2011-01-01'), Timestamp('2011-01-02'), pd.NaT],
['x', 'y', 1]])
@pytest.mark.parametrize('dtype', [None, object])
def test_objarr_radd_str_invalid(self, dtype, data, box):
ser = Series(data, dtype=dtype)
ser = tm.box_expected(ser, box)
with pytest.raises(TypeError):
'foo_' + ser
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub])
def test_objarr_add_invalid(self, op, box):
# invalid ops
obj_ser = tm.makeObjectSeries()
obj_ser.name = 'objects'
obj_ser = tm.box_expected(obj_ser, box)
with pytest.raises(Exception):
op(obj_ser, 1)
with pytest.raises(Exception):
op(obj_ser, np.array(1, dtype=np.int64))
# TODO: Moved from tests.series.test_operators; needs cleanup
def test_operators_na_handling(self):
ser = Series(['foo', 'bar', 'baz', np.nan])
result = 'prefix_' + ser
expected = pd.Series(['prefix_foo', 'prefix_bar',
'prefix_baz', np.nan])
tm.assert_series_equal(result, expected)
result = ser + '_suffix'
expected = pd.Series(['foo_suffix', 'bar_suffix',
'baz_suffix', np.nan])
tm.assert_series_equal(result, expected)
# TODO: parametrize over box
@pytest.mark.parametrize('dtype', [None, object])
def test_series_with_dtype_radd_timedelta(self, dtype):
# note this test is _not_ aimed at timedelta64-dtyped Series
ser = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')], dtype=dtype)
expected = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'),
pd.Timedelta('6 days')])
result = pd.Timedelta('3 days') + ser
tm.assert_series_equal(result, expected)
result = ser + pd.Timedelta('3 days')
tm.assert_series_equal(result, expected)
# TODO: cleanup & parametrize over box
def test_mixed_timezone_series_ops_object(self):
# GH#13043
ser = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
assert ser.dtype == object
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(ser + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + ser, exp)
# object series & object series
ser2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
assert ser2.dtype == object
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(ser2 - ser, exp)
tm.assert_series_equal(ser - ser2, -exp)
ser = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
assert ser.dtype == object
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(ser + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + ser, exp)
| bsd-3-clause |
ESeiler/daisy | hgt_eval.py | 1 | 18887 | #!usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import pysam
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
import sys
import csv
import argparse
import operator
import random
import time
import string
#from multiprocessing.dummy import Pool as TP
__author__ = "Kathrin Trappe"
# parse candidate file and keep entries with acc-don or don-acc translocations
def get_candidates(candfile, acceptor, donor):
with open(candfile) as input:
for line in input:
line = line.replace('\n', '')
if (line.split('\t')[0] == acceptor and line.split('\t')[3] == donor):
a_pos = int(line.split('\t')[1])
a_base = line.split('\t')[2]
d_pos = int(line.split('\t')[4])
d_base = line.split('\t')[5]
split_support = int(line.split('\t')[6])
yield(CAND(a_pos, a_base, d_pos, d_base, split_support))
if (line.split('\t')[3] == acceptor and line.split('\t')[0] == donor):
a_pos = int(line.split('\t')[4])
a_base = line.split('\t')[5]
d_pos = int(line.split('\t')[1])
d_base = line.split('\t')[2]
split_support = int(line.split('\t')[6])
yield(CAND(a_pos, a_base, d_pos, d_base, split_support))
def read_primary_pairs(inputfile):
first = None
second = None
name = None
for aln in inputfile:
if (aln.qname != name) and (name != None):
if (first != None) and (second != None):
yield (first, second)
first = None
second = None
name = aln.qname
if aln.is_secondary:
continue
if aln.is_read1:
first = aln
if aln.is_read2:
second = aln
if (first != None) and (second != None):
yield (first, second)
def read_pairs(inputfile):
first = None
second = None
name = None
for aln in inputfile:
if (first != None) and (second != None):
yield (first, second)
if (aln.qname != name) and (name != None):
first = None
second = None
name = aln.qname
#if aln.is_secondary:
#continue
if aln.is_read1:
first = aln
if aln.is_read2:
second = aln
if (first != None) and (second != None):
yield (first, second)
# Get all read names from phage sam file
def read_phage_pairs(inputfile):
name = None
for aln in inputfile:
if (aln.qname != name):
name = aln.qname
yield (name)
def get_random_regions(hgt_size, tabu_start, tabu_end, gen_size, num_regions):
randrange = random.randrange
return (get_random_region(hgt_size, tabu_start, tabu_end, gen_size, randrange) for x in xrange(num_regions))
# Generate random regions outside of candidate HGT region ("tabu region")
def get_random_region(hgt_size, tabu_start, tabu_end, gen_size, randrange):
start = 0
end = gen_size + 1
while (end > gen_size):
start = randrange(0, gen_size - hgt_size)
end = start + hgt_size
# random region is overlapping with tabu region
if (start == tabu_start) or (end == tabu_end) or (start < tabu_start and end > tabu_start) or (start < tabu_end and end > tabu_end):
end = gen_size + 1
return (start, end)
# Add donor pair if matching, i.e. both mates lie within donor HGT region
def adpim(don_start, don_end, aln1, aln2):
if (aln1.aend < don_start) or (aln1.pos > don_end): return 0
if (aln2.aend < don_start) or (aln2.pos > don_end): return 0
return 1
# Add pair if matching, i.e. first mate is on acceptor close to HGT boundary, second within donor HGT region
def apim(acc_start, acc_end, don_start, don_end, don_size, aln1, aln2):
if (aln1.aend < acc_end) and (aln1.pos > acc_start): return 0
if (aln1.aend < (acc_start - don_size/2)) or (aln1.pos > (acc_end + don_size/2)): return 0
if (aln2.aend < don_start) or (aln2.pos > don_end): return 0
return 1
# Add phage hit if read pair is in phage_list
def phage_hit(qname, phage_list):
if qname in phage_list:
return 1
return 0
# CAND class holding information of single boundaries
class CAND:
def __init__(self, acc_pos, acc_base, don_pos, don_base, split_support):
self.acc_pos = acc_pos
self.acc_base = acc_base
self.don_pos = don_pos
self.don_base = don_base
self.split_support = split_support
# HGT class holding all HGT related infos including sampling values
# acc_length/don_length: length of acceptor/donor genome (required for sampling)
# add_pair_if_matching functions evaluate two given alignments (reads) if they fall into the HGT boundaries
class HGT:
def __init__(self, acc_start, acc_end, acc_start_base, acc_end_base, don_start, don_end, split_support, options, acc_covs, don_covs):
self.acc_start = acc_start
self.acc_end = acc_end
self.acc_start_base = acc_start_base
self.acc_end_base = acc_end_base
self.don_start = don_start
self.don_end = don_end
self.don_size = don_end - don_start
self.acc_mean = np.mean(acc_covs[acc_start:acc_end],)
self.don_mean = np.mean(don_covs[don_start:don_end],)
self.don_pair_support = 0
self.pair_support = 0
self.phage_hits = 0
self.split_support = split_support
# Create sampling regions
self.settup_bootstrap(options, acc_covs, don_covs)
def add_pair_if_matching(self, aln1, aln2, phage_list):
self.add_pair_if_matching_bootstrap(aln1, aln2)
if (apim(self.acc_start, self.acc_end, self.don_start, self.don_end, self.don_size, aln1, aln2) != 0):
self.pair_support += 1
self.phage_hits += phage_hit(aln2.qname, phage_list)
def add_don_pair_if_matching(self, aln1, aln2, phage_list):
self.add_don_pair_if_matching_bootstrap(aln1, aln2)
if (aln1.aend < self.don_start) or (aln1.pos > self.don_end): return
if (aln2.aend < self.don_start) or (aln2.pos > self.don_end): return
self.don_pair_support += 1
self.phage_hits += phage_hit(aln1.qname, phage_list)
def settup_bootstrap(self, options, acc_covs, don_covs):
# Bootstrapping regions
it = list(get_random_regions((self.acc_end - self.acc_start), self.acc_start, self.acc_end, options.acc_length, options.boot_num))
l1, l2 = zip(*it)
self.boot_acc_start_list = list(l1)
self.boot_acc_end_list = list(l2)
self.boot_acc_coverage_list = map(lambda x: np.mean(acc_covs[x[0]:x[1]]), it)
it = list(get_random_regions((self.don_end - self.don_start), self.don_start, self.don_end, options.don_length, options.boot_num))
l1, l2 = zip(*it)
self.boot_don_start_list = list(l1)
self.boot_don_end_list = list(l2)
self.boot_don_coverage_list = map(lambda x: np.mean(don_covs[x[0]:x[1]]), it)
if (options.paired_reads):
self.boot_crossing_pairs_list = [0] * options.boot_num
self.boot_don_pairs_list = [0] * options.boot_num
def add_pair_if_matching_bootstrap(self, aln1, aln2):
hits = map(lambda x: apim(x[0], x[1], self.don_start, self.don_end, self.don_size, aln1, aln2), zip(self.boot_acc_start_list, self.boot_acc_end_list))
self.boot_crossing_pairs_list = [sum(x) for x in zip(self.boot_crossing_pairs_list, hits)]
def add_don_pair_if_matching_bootstrap(self, aln1, aln2):
hits = map(lambda x: adpim(x[0], x[1], aln1, aln2), zip(self.boot_don_start_list, self.boot_don_end_list))
self.boot_don_pairs_list = [sum(x) for x in zip(self.boot_don_pairs_list, hits)]
# Check for duplicate HGT entry, i.e. a HGT with all 4 boundaries within tolerance, resp.
def duplicate_entry(hgt_list, tolerance, acc_start, acc_end, don_start, don_end, split_support):
for i in xrange(0, len(hgt_list)):
last_hgt = hgt_list[i]
if (abs(last_hgt.acc_start - acc_start) < tolerance and
abs(last_hgt.acc_end - acc_end) < tolerance and
abs(last_hgt.don_start - don_start) < tolerance and
abs(last_hgt.don_end - don_end) < tolerance):
last_hgt.split_support += split_support
return 1
return 0
def main():
# Parsing options
parser = argparse.ArgumentParser(description='Hgt candidate evaluation.')
parser.add_argument('bamfile', help="BAM alignments file for both acceptor and donor, sorted via qname")
parser.add_argument('candfile', help="File with inter-chr translocation breakpoints")
parser.add_argument('acceptor', help="Name of acceptor reference (gi as in fasta)")
parser.add_argument('donor', help="Name of donor reference (gi as in fasta)")
# optional arguments (has default values)
parser.add_argument('--phagefile', default=None, help="SAM alignments file for phage database")
parser.add_argument('-o','--outfile', default="hgt_eval.vcf", help="Output VCF file with filtered, evaluated candidates")
parser.add_argument('--min-size', dest='min_size', default=100, type=int, help="minimal HGT size, default 100")
parser.add_argument('--max-size', dest='max_size', default=50000, type=int, help="maximal HGT size, default 50000")
parser.add_argument('--tolerance', default=20, type=int, help="Position range to remove duplicates, default 20")
parser.add_argument('--pair-support', dest='paired_reads', default=True, action='store_false', help="Turn on/off paired reads support, default TRUE")
parser.add_argument('--num-boot-regions', dest='boot_num', default=100, type=int, help="Number of sampled regions in acceptor for bootstrapping expected number of pairs and coverage for filtering, default 100")
parser.add_argument('--boot-sens', dest='boot_sens', default=95, type=int, choices=xrange(0, 100), help="Percent of cases for the candidate region to exceed values of sampled regions, default 95 percent")
options = parser.parse_args()
# Extracting parameters
hgt_minLength = options.min_size
hgt_maxLength = options.max_size
bf = pysam.Samfile(options.bamfile,'rb')
if (options.phagefile is not None):
psf = pysam.Samfile(options.phagefile,'r')
acc_tid = bf.gettid(options.acceptor)
don_tid = bf.gettid(options.donor)
options.out_all = options.outfile.strip('vcf')+'tsv'
print ('acc_tid', acc_tid, options.acceptor)
print ('don_tid', don_tid, options.donor)
print ('hgt_minLength', hgt_minLength)
print ('hgt_maxLength', hgt_maxLength)
print ('paired_reads', options.paired_reads)
print ('num_boot_regions', options.boot_num)
print ('boot_sensitivity', options.boot_sens)
# Getting acceptor genome length
options.acc_length = bf.lengths[acc_tid]
options.don_length = bf.lengths[don_tid]
# Extracting coverages
covs = [np.zeros((l,)) for l in bf.lengths]
num_matches = 0 # number of read matches, unused by now
for read in bf:
if not read.is_unmapped:
r_start = read.pos # start position
r_end = read.pos + read.qlen # end
covs[read.tid][r_start:r_end] += 1
num_matches += 1
bf.reset()
acc_cov = covs[acc_tid]
don_cov = covs[don_tid]
# Extracting phage read IDs, if any
phage_list = []
if (options.phagefile != None):
phage_list = [qname for qname in read_phage_pairs(psf)]
# Extracting candidate positions from candifate file
cand_list = [cand for cand in get_candidates(options.candfile, options.acceptor, options.donor)]
#indices = range(len(acc_pos))
#indices.sort(key=acc_pos.__getitem__)
cand_list.sort(key=operator.attrgetter('acc_pos'))
tstart = time.clock()
# Pair up single boundary candidates to HGT candidates conforming to size constraints
hgt_list = []
for ps in xrange(0, len(cand_list)):
cand_start = cand_list[ps]
acc_start = cand_start.acc_pos
don_start_temp = cand_start.don_pos
for pe in xrange(ps, len(cand_list)):
cand_end = cand_list[pe]
acc_end = cand_end.acc_pos + 1
don_end_temp = cand_end.don_pos + 1
# Exchange don_start/end so that don_start < don_end (we don't care about the orientation for now)
if (don_end_temp < don_start_temp):
don_start = don_end_temp
don_end = don_start_temp
else:
don_start = don_start_temp
don_end = don_end_temp
# Avoid similar hgt entries within tolerance
if (duplicate_entry(hgt_list, options.tolerance, acc_start, acc_end, don_start, don_end, cand_end.split_support)):
continue
# Continue if acceptor HGT region exceeds max length
if (abs(acc_end - acc_start) > hgt_maxLength):
break
if (abs(don_end - don_start) > hgt_minLength and abs(don_end - don_start) < hgt_maxLength):
split_support = (cand_start.split_support + cand_end.split_support)
hgt_list.append(HGT(acc_start, acc_end, cand_start.acc_base, cand_end.acc_base, don_start, don_end, split_support, options, acc_cov, don_cov))
print (time.clock() - tstart)
# Get (all/primary) read pairs which map to both donor and acceptor and add them to hgt attributes
if (options.paired_reads):
#for aln1, aln2 in read_pairs(bf):
for aln1, aln2 in read_primary_pairs(bf):
if aln1.is_unmapped:
continue
if aln2.is_unmapped:
continue
if (aln1.tid == don_tid) and (aln2.tid == don_tid):
for hgt in hgt_list:
hgt.add_don_pair_if_matching(aln1, aln2, phage_list)
if (aln1.tid != acc_tid) or (aln2.tid != don_tid):
aln1, aln2 = aln2, aln1 # Ensures that aln1 belongs to acceptor and aln2 to donor
if (aln1.tid != acc_tid) or (aln2.tid != don_tid):
continue
for hgt in hgt_list:
hgt.add_pair_if_matching(aln1, aln2, phage_list)
print ("total sample time ", time.clock() - tstart)
# Output results
with open(options.outfile, 'w') as vcf_out, open(options.out_all, 'w') as output:
# VCF header
writevcf = csv.writer(vcf_out, delimiter = '\t')
writevcf.writerow(['##fileformat=VCFv4.2'])
writevcf.writerow(['##source=DAISY'])
writevcf.writerow(['##INFO=<ID=EVENT,Number=1,Type=String,Description=\"Event identifier for breakends.\">'])
writevcf.writerow(['##contig=<ID='+options.acceptor+'>'])
writevcf.writerow(['##contig=<ID='+options.donor+'>'])
writevcf.writerow(['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT'])
# TSV header
writetsv = csv.writer(output, delimiter = '\t')
writetsv.writerow(['#AS: Acceptor start position'])
writetsv.writerow(['#AE: Acceptor end position'])
writetsv.writerow(['#DS: Donor start position'])
writetsv.writerow(['#DE: Donor end position'])
writetsv.writerow(['#MC: Mean coverage in region'])
writetsv.writerow(['#Split: Total number split-reads per region (including duplicates!)'])
writetsv.writerow(['#PS-S: Pairs spanning HGT boundaries'])
writetsv.writerow(['#PS-W: Pairs within HGT boundaries'])
writetsv.writerow(['#Phage: PS-S and PS-W reads mapping to phage database'])
writetsv.writerow(['#BS:MC/PS-S/PS-W: Percent of bootstrapped random regions with MC/PS-S/PS-W smaller than candidate'])
if (options.paired_reads):
writetsv.writerow(['AS', 'AE', 'MC', 'BS:MC', 'DS', 'DE', 'MC', 'Split', 'PS-S', 'PS-W', 'Phage', 'BS:MC', 'BS:PS-S', 'BS:PS-W'])
else:
writetsv.writerow(['AS', 'AE', 'MC', 'BS:MC', 'DS', 'DE', 'MC', 'Split', 'BS:MC'])
# write candidates
hgt_vcf_counter = 1
# Define sensitivity values for candidates to be reported in VCF
sens = float(options.boot_sens)/float(100)
sens_acc = 1.0 - sens
thresh = sens * float(options.boot_num)
for hgt in hgt_list:
# evaluate bootstrap
acc_cov_test = sum(1 for i in xrange(len(hgt.boot_acc_coverage_list)) if hgt.acc_mean > hgt.boot_acc_coverage_list[i])
don_cov_test = sum(1 for i in xrange(len(hgt.boot_don_coverage_list)) if hgt.don_mean > hgt.boot_don_coverage_list[i])
if (options.paired_reads):
# write all candidates to tsv file
cross_pair_test = sum(1 for i in xrange(len(hgt.boot_crossing_pairs_list)) if hgt.pair_support > hgt.boot_crossing_pairs_list[i])
don_pair_test = sum(1 for i in xrange(len(hgt.boot_don_pairs_list)) if hgt.don_pair_support > hgt.boot_don_pairs_list[i])
phage_support = 0
if ((hgt.pair_support + hgt.don_pair_support) > 0):
phage_support = float(hgt.phage_hits)/float((hgt.pair_support + hgt.don_pair_support))
writetsv.writerow([hgt.acc_start, hgt.acc_end, "%.2f" % hgt.acc_mean, acc_cov_test, hgt.don_start, hgt.don_end, "%.2f" % hgt.don_mean, hgt.split_support, hgt.pair_support, hgt.don_pair_support, "%.4f" % phage_support, don_cov_test, cross_pair_test, don_pair_test])
else:
writetsv.writerow([hgt.acc_start, hgt.acc_end, "%.2f" % hgt.acc_mean, acc_cov_test, hgt.don_start, hgt.don_end, "%.2f" % hgt.don_mean, hgt.split_support, don_cov_test])
# Write only canidates to VCF file that passed the filter (boot_sens)
if (options.boot_sens > 0):
if (acc_cov_test < thresh) and (acc_cov_test > sens_acc * options.boot_num): continue
if (options.paired_reads):
if (cross_pair_test < thresh) or (don_pair_test < thresh): continue
if (don_cov_test < thresh): continue
# write filtered candidates to VCF file
writevcf.writerow([options.acceptor, hgt.acc_start, 'BND_'+str(hgt_vcf_counter)+'_1', hgt.acc_start_base, hgt.acc_start_base+'['+options.donor+':'+str(hgt.don_start)+'[', 'PASS', 'SVTYPE=BND;EVENT=HGT'+str(hgt_vcf_counter), '.', '1'])
writevcf.writerow([options.acceptor, hgt.acc_end, 'BND_'+str(hgt_vcf_counter)+'_2', hgt.acc_end_base, ']'+options.donor+':'+str(hgt.don_end)+']'+hgt.acc_end_base, 'PASS', 'SVTYPE=BND;EVENT=HGT'+str(hgt_vcf_counter), '.', '1'])
hgt_vcf_counter += 1
if (hgt_vcf_counter == 1):
print ('No canidates written to VCF, try to rerun with lower sampling sensitivity (--boot_sens)')
if __name__ == '__main__':
#profile.run('re.compile("main")')
sys.exit(main())
| gpl-3.0 |
GoogleCloudPlatform/tensorflow-without-a-phd | tensorflow-mnist-tutorial/mnist_1.0_softmax.py | 1 | 5496 | # encoding: UTF-8
# Copyright 2016 Google.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflowvisu
import mnistdata
import math
print("Tensorflow version " + tf.__version__)
tf.set_random_seed(0)
# neural network with 1 layer of 10 softmax neurons
#
# · · · · · · · · · · (input data, flattened pixels) X [batch, 784] # 784 = 28 * 28
# \x/x\x/x\x/x\x/x\x/ -- fully connected layer (softmax) W [784, 10] b[10]
# · · · · · · · · Y [batch, 10]
# The model is:
#
# Y = softmax( X * W + b)
# X: matrix for 100 grayscale images of 28x28 pixels, flattened (there are 100 images in a mini-batch)
# W: weight matrix with 784 lines and 10 columns
# b: bias vector with 10 dimensions
# +: add with broadcasting: adds the vector to each line of the matrix (numpy)
# softmax(matrix) applies softmax on each line
# softmax(line) applies an exp to each value then divides by the norm of the resulting line
# Y: output matrix with 100 lines and 10 columns
# Download images and labels into mnist.test (10K images+labels) and mnist.train (60K images+labels)
mnist = mnistdata.read_data_sets("data", one_hot=True, reshape=False)
# input X: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
# correct answers will go here
Y_ = tf.placeholder(tf.float32, [None, 10])
# weights W[784, 10] 784=28*28
W = tf.Variable(tf.zeros([784, 10]))
# biases b[10]
b = tf.Variable(tf.zeros([10]))
# flatten the images into a single line of pixels
# -1 in the shape definition means "the only possible dimension that will preserve the number of elements"
XX = tf.reshape(X, [-1, 784])
# The model
Y = tf.nn.softmax(tf.matmul(XX, W) + b)
# loss function: cross-entropy = - sum( Y_i * log(Yi) )
# Y: the computed output vector
# Y_: the desired output vector
# cross-entropy
# log takes the log of each element, * multiplies the tensors element by element
# reduce_mean will add all the components in the tensor
# so here we end up with the total cross-entropy for all images in the batch
cross_entropy = -tf.reduce_mean(Y_ * tf.log(Y)) * 1000.0 # normalized for batches of 100 images,
# *10 because "mean" included an unwanted division by 10
# accuracy of the trained model, between 0 (worst) and 1 (best)
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# training, learning rate = 0.005
train_step = tf.train.GradientDescentOptimizer(0.005).minimize(cross_entropy)
# matplotlib visualisation
allweights = tf.reshape(W, [-1])
allbiases = tf.reshape(b, [-1])
I = tensorflowvisu.tf_format_mnist_images(X, Y, Y_) # assembles 10x10 images by default
It = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, 1000, lines=25) # 1000 images on 25 lines
datavis = tensorflowvisu.MnistDataVis()
# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# You can call this function in a loop to train the model, 100 images at a time
def training_step(i, update_test_data, update_train_data):
# training on batches of 100 images with 100 labels
batch_X, batch_Y = mnist.train.next_batch(100)
# compute training values for visualisation
if update_train_data:
a, c, im, w, b = sess.run([accuracy, cross_entropy, I, allweights, allbiases], feed_dict={X: batch_X, Y_: batch_Y})
datavis.append_training_curves_data(i, a, c)
datavis.append_data_histograms(i, w, b)
datavis.update_image1(im)
print(str(i) + ": accuracy:" + str(a) + " loss: " + str(c))
# compute test values for visualisation
if update_test_data:
a, c, im = sess.run([accuracy, cross_entropy, It], feed_dict={X: mnist.test.images, Y_: mnist.test.labels})
datavis.append_test_curves_data(i, a, c)
datavis.update_image2(im)
print(str(i) + ": ********* epoch " + str(i*100//mnist.train.images.shape[0]+1) + " ********* test accuracy:" + str(a) + " test loss: " + str(c))
# the backpropagation training step
sess.run(train_step, feed_dict={X: batch_X, Y_: batch_Y})
datavis.animate(training_step, iterations=2000+1, train_data_update_freq=10, test_data_update_freq=50, more_tests_at_start=True)
# to save the animation as a movie, add save_movie=True as an argument to datavis.animate
# to disable the visualisation use the following line instead of the datavis.animate line
# for i in range(2000+1): training_step(i, i % 50 == 0, i % 10 == 0)
print("max test accuracy: " + str(datavis.get_max_test_accuracy()))
# final max test accuracy = 0.9268 (10K iterations). Accuracy should peak above 0.92 in the first 2000 iterations.
| apache-2.0 |
fzr72725/ThinkStats2 | code/regression.py | 62 | 9652 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import pandas
import random
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import re
import chap01soln
import first
import linear
import thinkplot
import thinkstats2
def QuickLeastSquares(xs, ys):
"""Estimates linear least squares fit and returns MSE.
xs: sequence of values
ys: sequence of values
returns: inter, slope, mse
"""
n = float(len(xs))
meanx = xs.mean()
dxs = xs - meanx
varx = np.dot(dxs, dxs) / n
meany = ys.mean()
dys = ys - meany
cov = np.dot(dxs, dys) / n
slope = cov / varx
inter = meany - slope * meanx
res = ys - (inter + slope * xs)
mse = np.dot(res, res) / n
return inter, slope, mse
def ReadVariables():
"""Reads Stata dictionary files for NSFG data.
returns: DataFrame that maps variables names to descriptions
"""
vars1 = thinkstats2.ReadStataDct('2002FemPreg.dct').variables
vars2 = thinkstats2.ReadStataDct('2002FemResp.dct').variables
all_vars = vars1.append(vars2)
all_vars.index = all_vars.name
return all_vars
def JoinFemResp(df):
"""Reads the female respondent file and joins on caseid.
df: DataFrame
"""
resp = chap01soln.ReadFemResp()
resp.index = resp.caseid
join = df.join(resp, on='caseid', rsuffix='_r')
# convert from colon-separated time strings to datetimes
join.screentime = pandas.to_datetime(join.screentime)
return join
def GoMining(df):
"""Searches for variables that predict birth weight.
df: DataFrame of pregnancy records
returns: list of (rsquared, variable name) pairs
"""
variables = []
for name in df.columns:
try:
if df[name].var() < 1e-7:
continue
formula = 'totalwgt_lb ~ agepreg + ' + name
formula = formula.encode('ascii')
model = smf.ols(formula, data=df)
if model.nobs < len(df)/2:
continue
results = model.fit()
except (ValueError, TypeError):
continue
variables.append((results.rsquared, name))
return variables
def MiningReport(variables, n=30):
"""Prints variables with the highest R^2.
t: list of (R^2, variable name) pairs
n: number of pairs to print
"""
all_vars = ReadVariables()
variables.sort(reverse=True)
for mse, name in variables[:n]:
key = re.sub('_r$', '', name)
try:
desc = all_vars.loc[key].desc
if isinstance(desc, pandas.Series):
desc = desc[0]
print(name, mse, desc)
except KeyError:
print(name, mse)
def PredictBirthWeight(live):
"""Predicts birth weight of a baby at 30 weeks.
live: DataFrame of live births
"""
live = live[live.prglngth>30]
join = JoinFemResp(live)
t = GoMining(join)
MiningReport(t)
formula = ('totalwgt_lb ~ agepreg + C(race) + babysex==1 + '
'nbrnaliv>1 + paydu==1 + totincr')
results = smf.ols(formula, data=join).fit()
SummarizeResults(results)
def SummarizeResults(results):
"""Prints the most important parts of linear regression results:
results: RegressionResults object
"""
for name, param in results.params.iteritems():
pvalue = results.pvalues[name]
print('%s %0.3g (%.3g)' % (name, param, pvalue))
try:
print('R^2 %.4g' % results.rsquared)
ys = results.model.endog
print('Std(ys) %.4g' % ys.std())
print('Std(res) %.4g' % results.resid.std())
except AttributeError:
print('R^2 %.4g' % results.prsquared)
def RunSimpleRegression(live):
"""Runs a simple regression and compare results to thinkstats2 functions.
live: DataFrame of live births
"""
# run the regression with thinkstats2 functions
live_dropna = live.dropna(subset=['agepreg', 'totalwgt_lb'])
ages = live_dropna.agepreg
weights = live_dropna.totalwgt_lb
inter, slope = thinkstats2.LeastSquares(ages, weights)
res = thinkstats2.Residuals(ages, weights, inter, slope)
r2 = thinkstats2.CoefDetermination(weights, res)
# run the regression with statsmodels
formula = 'totalwgt_lb ~ agepreg'
model = smf.ols(formula, data=live)
results = model.fit()
SummarizeResults(results)
def AlmostEquals(x, y, tol=1e-6):
return abs(x-y) < tol
assert(AlmostEquals(results.params['Intercept'], inter))
assert(AlmostEquals(results.params['agepreg'], slope))
assert(AlmostEquals(results.rsquared, r2))
def PivotTables(live):
"""Prints a pivot table comparing first babies to others.
live: DataFrame of live births
"""
table = pandas.pivot_table(live, rows='isfirst',
values=['totalwgt_lb', 'agepreg'])
print(table)
def FormatRow(results, columns):
"""Converts regression results to a string.
results: RegressionResults object
returns: string
"""
t = []
for col in columns:
coef = results.params.get(col, np.nan)
pval = results.pvalues.get(col, np.nan)
if np.isnan(coef):
s = '--'
elif pval < 0.001:
s = '%0.3g (*)' % (coef)
else:
s = '%0.3g (%0.2g)' % (coef, pval)
t.append(s)
try:
t.append('%.2g' % results.rsquared)
except AttributeError:
t.append('%.2g' % results.prsquared)
return t
def RunModels(live):
"""Runs regressions that predict birth weight.
live: DataFrame of pregnancy records
"""
columns = ['isfirst[T.True]', 'agepreg', 'agepreg2']
header = ['isfirst', 'agepreg', 'agepreg2']
rows = []
formula = 'totalwgt_lb ~ isfirst'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ isfirst + agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
live['agepreg2'] = live.agepreg**2
formula = 'totalwgt_lb ~ isfirst + agepreg + agepreg2'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
PrintTabular(rows, header)
def PrintTabular(rows, header):
"""Prints results in LaTeX tabular format.
rows: list of rows
header: list of strings
"""
s = r'\hline ' + ' & '.join(header) + r' \\ \hline'
print(s)
for row in rows:
s = ' & '.join(row) + r' \\'
print(s)
print(r'\hline')
def LogisticRegressionExample():
"""Runs a simple example of logistic regression and prints results.
"""
y = np.array([0, 1, 0, 1])
x1 = np.array([0, 0, 0, 1])
x2 = np.array([0, 1, 1, 1])
beta = [-1.5, 2.8, 1.1]
log_o = beta[0] + beta[1] * x1 + beta[2] * x2
print(log_o)
o = np.exp(log_o)
print(o)
p = o / (o+1)
print(p)
like = y * p + (1-y) * (1-p)
print(like)
print(np.prod(like))
df = pandas.DataFrame(dict(y=y, x1=x1, x2=x2))
results = smf.logit('y ~ x1 + x2', data=df).fit()
print(results.summary())
def RunLogisticModels(live):
"""Runs regressions that predict sex.
live: DataFrame of pregnancy records
"""
#live = linear.ResampleRowsWeighted(live)
df = live[live.prglngth>30]
df['boy'] = (df.babysex==1).astype(int)
df['isyoung'] = (df.agepreg<20).astype(int)
df['isold'] = (df.agepreg<35).astype(int)
df['season'] = (((df.datend+1) % 12) / 3).astype(int)
# run the simple model
model = smf.logit('boy ~ agepreg', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# run the complex model
model = smf.logit('boy ~ agepreg + hpagelb + birthord + C(race)', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# make the scatter plot
exog = pandas.DataFrame(model.exog, columns=model.exog_names)
endog = pandas.DataFrame(model.endog, columns=[model.endog_names])
xs = exog['agepreg']
lo = results.fittedvalues
o = np.exp(lo)
p = o / (o+1)
#thinkplot.Scatter(xs, p, alpha=0.1)
#thinkplot.Show()
# compute accuracy
actual = endog['boy']
baseline = actual.mean()
predict = (results.predict() >= 0.5)
true_pos = predict * actual
true_neg = (1 - predict) * (1 - actual)
acc = (sum(true_pos) + sum(true_neg)) / len(actual)
print(acc, baseline)
columns = ['agepreg', 'hpagelb', 'birthord', 'race']
new = pandas.DataFrame([[35, 39, 3, 1]], columns=columns)
y = results.predict(new)
print(y)
def main(name, data_dir='.'):
thinkstats2.RandomSeed(17)
LogisticRegressionExample()
live, firsts, others = first.MakeFrames()
live['isfirst'] = (live.birthord == 1)
RunLogisticModels(live)
RunSimpleRegression(live)
RunModels(live)
PredictBirthWeight(live)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
13anjou/Research-Internship | Analysis/analyseDiff.py | 1 | 1075 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import csv
from pylab import*
import numpy
import math
import matplotlib.pyplot as plt
import numbers
k = 0.2
s = 0.8
import dico as d
def main(k) :
dico = d.main()
plt.figure(figsize=(20,20))
plt.title('Mean distance to the power-law distribution versus stations', fontsize = 40)
plt.legend()
with open('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Resultats\\log_log\\10_{}\\toutes\\ecartPowerLaw_{}.csv'.format(k,k), 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';',quotechar=',', quoting=csv.QUOTE_MINIMAL)
for row in reader :
scatter(enlever(row[0]),enlever(row[1]))
yticks(fontsize=40)
xticks(fontsize=40)
plt.xlabel("station", fontsize=40)
plt.ylabel("mean distance to the power-law distribution", fontsize=40)
savefig('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Python\\correlation\\DistancePowerLaw.png')
clf()
if __name__ == "__main__" :
main(k)
def enlever(s) :
res = str()
for l in s :
if l ==' ' :
pass
else :
res=res + l
return(res) | gpl-2.0 |
miaecle/deepchem | contrib/one_shot_models/multitask_classifier.py | 5 | 10016 | """
Implements a multitask graph convolutional classifier.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Han Altae-Tran and Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import warnings
import os
import sys
import numpy as np
import tensorflow as tf
import sklearn.metrics
import tempfile
from deepchem.data import pad_features
from deepchem.utils.save import log
from deepchem.models import Model
from deepchem.nn.copy import Input
from deepchem.nn.copy import Dense
from deepchem.nn import model_ops
# TODO(rbharath): Find a way to get rid of this import?
from deepchem.models.tf_new_models.graph_topology import merge_dicts
def get_loss_fn(final_loss):
# Obtain appropriate loss function
if final_loss == 'L2':
def loss_fn(x, t):
diff = tf.subtract(x, t)
return tf.reduce_sum(tf.square(diff), 0)
elif final_loss == 'weighted_L2':
def loss_fn(x, t, w):
diff = tf.subtract(x, t)
weighted_diff = tf.multiply(diff, w)
return tf.reduce_sum(tf.square(weighted_diff), 0)
elif final_loss == 'L1':
def loss_fn(x, t):
diff = tf.subtract(x, t)
return tf.reduce_sum(tf.abs(diff), 0)
elif final_loss == 'huber':
def loss_fn(x, t):
diff = tf.subtract(x, t)
return tf.reduce_sum(
tf.minimum(0.5 * tf.square(diff),
huber_d * (tf.abs(diff) - 0.5 * huber_d)), 0)
elif final_loss == 'cross_entropy':
def loss_fn(x, t, w):
costs = tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=t)
weighted_costs = tf.multiply(costs, w)
return tf.reduce_sum(weighted_costs)
elif final_loss == 'hinge':
def loss_fn(x, t, w):
t = tf.multiply(2.0, t) - 1
costs = tf.maximum(0.0, 1.0 - tf.multiply(t, x))
weighted_costs = tf.multiply(costs, w)
return tf.reduce_sum(weighted_costs)
return loss_fn
class MultitaskGraphClassifier(Model):
def __init__(self,
model,
n_tasks,
n_feat,
logdir=None,
batch_size=50,
final_loss='cross_entropy',
learning_rate=.001,
optimizer_type="adam",
learning_rate_decay_time=1000,
beta1=.9,
beta2=.999,
pad_batches=True,
verbose=True):
warnings.warn(
"MultitaskGraphClassifier is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
super(MultitaskGraphClassifier, self).__init__(
model_dir=logdir, verbose=verbose)
self.n_tasks = n_tasks
self.final_loss = final_loss
self.model = model
self.sess = tf.Session(graph=self.model.graph)
with self.model.graph.as_default():
# Extract model info
self.batch_size = batch_size
self.pad_batches = pad_batches
# Get graph topology for x
self.graph_topology = self.model.get_graph_topology()
self.feat_dim = n_feat
# Raw logit outputs
self.logits = self.build()
self.loss_op = self.add_training_loss(self.final_loss, self.logits)
self.outputs = self.add_softmax(self.logits)
self.learning_rate = learning_rate
self.T = learning_rate_decay_time
self.optimizer_type = optimizer_type
self.optimizer_beta1 = beta1
self.optimizer_beta2 = beta2
# Set epsilon
self.epsilon = 1e-7
self.add_optimizer()
# Initialize
self.init_fn = tf.global_variables_initializer()
self.sess.run(self.init_fn)
# Path to save checkpoint files, which matches the
# replicated supervisor's default path.
self._save_path = os.path.join(self.model_dir, 'model.ckpt')
def build(self):
# Create target inputs
self.label_placeholder = tf.placeholder(
dtype='bool', shape=(None, self.n_tasks), name="label_placeholder")
self.weight_placeholder = tf.placeholder(
dtype='float32', shape=(None, self.n_tasks), name="weight_placholder")
feat = self.model.return_outputs()
################################################################ DEBUG
#print("multitask classifier")
#print("feat")
#print(feat)
################################################################ DEBUG
output = model_ops.multitask_logits(feat, self.n_tasks)
return output
def add_optimizer(self):
if self.optimizer_type == "adam":
self.optimizer = tf.train.AdamOptimizer(
self.learning_rate,
beta1=self.optimizer_beta1,
beta2=self.optimizer_beta2,
epsilon=self.epsilon)
else:
raise ValueError("Optimizer type not recognized.")
# Get train function
self.train_op = self.optimizer.minimize(self.loss_op)
def construct_feed_dict(self, X_b, y_b=None, w_b=None, training=True):
"""Get initial information about task normalization"""
# TODO(rbharath): I believe this is total amount of data
n_samples = len(X_b)
if y_b is None:
y_b = np.zeros((n_samples, self.n_tasks))
if w_b is None:
w_b = np.zeros((n_samples, self.n_tasks))
targets_dict = {self.label_placeholder: y_b, self.weight_placeholder: w_b}
# Get graph information
atoms_dict = self.graph_topology.batch_to_feed_dict(X_b)
# TODO (hraut->rhbarath): num_datapoints should be a vector, with ith element being
# the number of labeled data points in target_i. This is to normalize each task
# num_dat_dict = {self.num_datapoints_placeholder : self.}
# Get other optimizer information
# TODO(rbharath): Figure out how to handle phase appropriately
feed_dict = merge_dicts([targets_dict, atoms_dict])
return feed_dict
def add_training_loss(self, final_loss, logits):
"""Computes loss using logits."""
loss_fn = get_loss_fn(final_loss) # Get loss function
task_losses = []
# label_placeholder of shape (batch_size, n_tasks). Split into n_tasks
# tensors of shape (batch_size,)
task_labels = tf.split(
axis=1, num_or_size_splits=self.n_tasks, value=self.label_placeholder)
task_weights = tf.split(
axis=1, num_or_size_splits=self.n_tasks, value=self.weight_placeholder)
for task in range(self.n_tasks):
task_label_vector = task_labels[task]
task_weight_vector = task_weights[task]
# Convert the labels into one-hot vector encodings.
one_hot_labels = tf.cast(
tf.one_hot(tf.cast(tf.squeeze(task_label_vector), tf.int32), 2),
tf.float32)
# Since we use tf.nn.softmax_cross_entropy_with_logits note that we pass in
# un-softmaxed logits rather than softmax outputs.
task_loss = loss_fn(logits[task], one_hot_labels, task_weight_vector)
task_losses.append(task_loss)
# It's ok to divide by just the batch_size rather than the number of nonzero
# examples (effect averages out)
total_loss = tf.add_n(task_losses)
total_loss = tf.math.divide(total_loss, self.batch_size)
return total_loss
def add_softmax(self, outputs):
"""Replace logits with softmax outputs."""
softmax = []
with tf.name_scope('inference'):
for i, logits in enumerate(outputs):
softmax.append(tf.nn.softmax(logits, name='softmax_%d' % i))
return softmax
def fit(self,
dataset,
nb_epoch=10,
max_checkpoints_to_keep=5,
log_every_N_batches=50,
checkpoint_interval=10,
**kwargs):
# Perform the optimization
log("Training for %d epochs" % nb_epoch, self.verbose)
# TODO(rbharath): Disabling saving for now to try to debug.
for epoch in range(nb_epoch):
log("Starting epoch %d" % epoch, self.verbose)
for batch_num, (X_b, y_b, w_b, ids_b) in enumerate(
dataset.iterbatches(self.batch_size, pad_batches=self.pad_batches)):
if batch_num % log_every_N_batches == 0:
log("On batch %d" % batch_num, self.verbose)
self.sess.run(
self.train_op, feed_dict=self.construct_feed_dict(X_b, y_b, w_b))
def save(self):
"""
No-op since this model doesn't currently support saving...
"""
pass
def predict(self, dataset, transformers=[], **kwargs):
"""Wraps predict to set batch_size/padding."""
return super(MultitaskGraphClassifier, self).predict(
dataset, transformers, batch_size=self.batch_size)
def predict_proba(self, dataset, transformers=[], n_classes=2, **kwargs):
"""Wraps predict_proba to set batch_size/padding."""
return super(MultitaskGraphClassifier, self).predict_proba(
dataset, transformers, n_classes=n_classes, batch_size=self.batch_size)
def predict_on_batch(self, X):
"""Return model output for the provided input.
"""
if self.pad_batches:
X = pad_features(self.batch_size, X)
# run eval data through the model
n_tasks = self.n_tasks
with self.sess.as_default():
feed_dict = self.construct_feed_dict(X)
# Shape (n_samples, n_tasks)
batch_outputs = self.sess.run(self.outputs, feed_dict=feed_dict)
n_samples = len(X)
outputs = np.zeros((n_samples, self.n_tasks))
for task, output in enumerate(batch_outputs):
outputs[:, task] = np.argmax(output, axis=1)
return outputs
def predict_proba_on_batch(self, X, n_classes=2):
"""Returns class probabilities on batch"""
# run eval data through the model
if self.pad_batches:
X = pad_features(self.batch_size, X)
n_tasks = self.n_tasks
with self.sess.as_default():
feed_dict = self.construct_feed_dict(X)
batch_outputs = self.sess.run(self.outputs, feed_dict=feed_dict)
n_samples = len(X)
outputs = np.zeros((n_samples, self.n_tasks, n_classes))
for task, output in enumerate(batch_outputs):
outputs[:, task, :] = output
return outputs
def get_num_tasks(self):
"""Needed to use Model.predict() from superclass."""
return self.n_tasks
| mit |
jswoboda/GeoDataPython | Test/plottingtest3d.py | 1 | 4148 | #!/usr/bin/env python
"""
@author: John Swoboda
"""
from __future__ import division,print_function
import logging
import pdb
import os
import matplotlib.pyplot as plt
import numpy as np
#
from GeoData.plotting import slice2DGD, plotbeamposGD, insertinfo, contourGD
from GeoData.plottingmayavi import plot3Dslice
#
from load_isropt import load_risromti
#
try:
from mayavi import mlab
except ImportError:
pass
omtislices = [[],[],[140]]
risrslices = [[100],[300],[]]
vbounds = [[200,800],[5e10,5e11]]
def make_data(risrName,omtiName):
risr,omti = load_risromti(risrName,omtiName)
#%% creating GeoData objects of the 2 files, given a specific parameter
reglistlist = omti.timeregister(risr)
keepomti = [i for i in range(len(reglistlist)) if len(reglistlist[i])>0]
reglist = list(set(np.concatenate(reglistlist)))
#%% converting logarthmic electron density (nel) array into electron density (ne) array
risr_classred =risr.timeslice(reglist,'Array')
omti = omti.timeslice(keepomti,'Array')
reglistfinal = omti.timeregister(risr_classred)
xvec,yvec,zvec = [np.linspace(-100.0,500.0,25),np.linspace(0.0,600.0,25),np.linspace(100.0,500.0,25)]
x,y,z = np.meshgrid(xvec,yvec,zvec)
x2d,y2d = np.meshgrid(xvec,yvec)
new_coords =np.column_stack((x.ravel(),y.ravel(),z.ravel()))
new_coords2 = np.column_stack((x2d.ravel(),y2d.ravel(),140.0*np.ones(y2d.size)))
#%% interpolate risr data
risr_classred.interpolate(new_coords, newcoordname='Cartesian', method='linear', fill_value=np.nan)
#%% interpolate omti data
omti.interpolate(new_coords2, newcoordname='Cartesian', twodinterp = True,method='linear', fill_value=np.nan)
return (risr_classred,risr,omti,reglistfinal)
def plotting(risr_classred,risr_class,omti_class,reglistfinal,odir=None):
figcount = 0
for iomti,ilist in enumerate(reglistfinal):
for irisr in ilist:
omtitime = iomti
risrtime = irisr
try:
mfig = mlab.figure(fgcolor=(1, 1, 1), bgcolor=(1, 1, 1))
surflist1 = plot3Dslice(omti_class, omtislices, vbounds[0],
time = omtitime,cmap='gray',gkey = 'optical',fig=mfig)
arr = plot3Dslice(risr_classred, risrslices, vbounds[1],
time = risrtime,cmap='jet',gkey = 'ne',fig=mfig,units = 'm^{-3}',colorbar = True,view=[50,60],outimage=True)
titlestr1 = '$N_e$ and OMTI at $thm'
newtitle = insertinfo(titlestr1,'',risr_classred.times[risrtime,0],risr_classred.times[risrtime,1])
except Exception as e:
logging.error('trouble with 3D plot {}'.format(e))
(figmplf, [[ax1,ax2],[ax3,ax4]]) = plt.subplots(2, 2,figsize=(16, 12), facecolor='w')
try:
ax1.imshow(arr)
ax1.set_title(newtitle)
ax1.axis('off')
except:
pass
(slice2,cbar2) = slice2DGD(risr_classred,'z',400,vbounds[1],title='$N_e$ at $thm',
time = risrtime,cmap='jet',gkey = 'ne',fig=figmplf,ax=ax2)
cbar2.set_label('$N_e$ in $m^{-3}$')
(slice3,cbar3) = slice2DGD(omti_class,'z',omtislices[-1][0],vbounds[0],title='OMTI at $thm',
time = omtitime,cmap='Greys',gkey = 'optical',fig=figmplf,ax=ax3,cbar=False)
plt.hold(True)
(slice4,cbar4) = contourGD(risr_classred,'z',400,vbounds[1],title='$N_e$ at $thm',
time = risrtime,cmap='jet',gkey = 'ne',fig=figmplf,ax=ax3)
cbar4.set_label('$N_e$ in $m^{-3}$')
bmpos = plotbeamposGD(risr_class)
if odir is not None:
figname = os.path.join(odir,'figure{0:0>2}.png'.format(figcount))
figmplf.savefig(figname,format='png',dpi = 400)
plt.close(figmplf)
figcount=figcount+1
if __name__ == "__main__":
(risr_classred,risr_class,omti_class,reglistfinal) = make_data('data/ran120219.004.hdf5','data/OMTIdata.h5')
plotting(risr_classred,risr_class,omti_class,reglistfinal)
| mit |
la3lma/movement-analysis | src/dataset.py | 1 | 8661 | '''Usage: dataset.py [--model=x --data=feed] [--plot] [--pca] [--normalize]
Options:
--model=m Path to trained model, omit to rebuild the model
--data=feed Path to data file to monitor for live data.
If you pass in a folder, it'll pick the last touched file in the folder.
--plot Show confusion matrix in a separate window
--pca Apply PCA to the datasets before doing classifications
--normalize Normalize FFT buckets before running classifier.
'''
import time
from numpy import fft
from numpy import array
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from pickle import BINSTRING
import math
import os, time, sys
from sklearn import svm
from sklearn import grid_search
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import confusion_matrix
from sklearn.externals import joblib
import docopt
class sample_file:
def __init__(self, filename):
self.filename = filename
self.timestamps = []
raw_data = []
with open(filename) as file:
lines = [line for line in file]
for line in lines[2:]:
parts = [float(measurement.strip()) for measurement in line.split(';')]
self.timestamps.append(parts[0])
raw_data.append(parts[1:])
self.data = []
self.gyros = []
self.accelerations = []
for i in range(len(raw_data) - 1):
current = raw_data[i]
next = raw_data[i + 1]
gyro = []
for j in range(3):
delta = next[j] - current[j]
if abs(delta) > 180:
delta -= 360 * delta / abs(delta)
gyro.append(delta)
gyro = [next[j] - current[j] for j in range(3)]
acceleration = current[3:6]
self.data.append([gyro[0], gyro[1], gyro[2], acceleration[0], acceleration[1], acceleration[2]])
self.gyros.append(gyro)
self.accelerations.append(acceleration)
def get_frequencies(self):
num_seconds = float(self.timestamps[-2] - self.timestamps[0]) / float(1000)
samples_per_second = len(self.data) / num_seconds
num_samples = len(self.data)
oscilations_per_sample = [float(oscilations) / num_samples for oscilations in range(0, num_samples)]
return [ops * samples_per_second for ops in oscilations_per_sample]
def get_buckets(self, first, last, num_buckets, hertz_cutoff=float(5)):
if arguments['--pca']:
# Transform all of the original data to be a single component
# along the first principal component
pca = PCA(n_components=1, copy=True, whiten=True)
numpy_data = array(self.data)
transformed_dataset = PCA.fit_transform(pca, numpy_data)
print(pca.explained_variance_ratio_)
slice=transformed_dataset[first:last]
else:
# Otherwise just pick the beta component from the gyro data
slice = self.data[first:last]
slice = [column[1] for column in slice]
transformed = fft.fft(slice)
absolute = [abs(complex) for complex in transformed]
frequencies = self.get_frequencies()
buckets = [0 for i in range(num_buckets)]
width = hertz_cutoff / num_buckets
sum_of_buckets = 0.0000001
for i in range(1, len(absolute)):
index = int(frequencies[i] / width)
if index >= num_buckets:
break;
buckets[index] += absolute[i]
sum_of_buckets += absolute[i]
if arguments['--normalize']:
buckets = map(lambda x: x/sum_of_buckets, buckets)
return buckets
def get_samples(self):
result = []
segmentsize=30
# Reduce this to very little to get very large trainingsets
stride=5
noOfBuckets=40
for start in range(0, len(self.data) - segmentsize, stride):
if start + segmentsize <= len(self.data):
segments_buckets = self.get_buckets(start, start + segmentsize, noOfBuckets)
result.append(segments_buckets)
return result
def keep_last_lines(self, num_lines):
self.data = self.data[-num_lines:]
class dataset:
def __init__(self, foldername, filters = {'dancing': 0, 'walking': 1, 'sitting':2}):
self.data = []
self.target = []
self.activities = []
noOfSamples = 0
for activity, number in filters.iteritems():
samples = get_samples(foldername, filter=activity)
for sample in samples:
noOfSamples +=1
self.data.append(sample)
self.target.append(number)
self.activities.append(activity)
print "foldername= ", foldername, "noOfSamples= ", noOfSamples
def get_samples(foldername, filter=None):
samples = []
for file in os.listdir(foldername):
if filter and file.find(filter) == -1:
continue
for sample in sample_file(foldername + '/' + file).get_samples():
samples.append(sample)
return samples
if __name__ == '__main__':
arguments = docopt.docopt(__doc__)
filters = {'dancing': 0, 'walking': 1, 'sitting':2}
if arguments['--model']:
clf = joblib.load(arguments['--model'])
else:
training = dataset('../datasets/training', filters)
svr = svm.SVC()
exponential_range = [pow(10, i) for i in range(-4, 1)]
parameters = {'kernel':['linear', 'rbf'], 'C':exponential_range, 'gamma':exponential_range}
clf = grid_search.GridSearchCV(svr, parameters, n_jobs=8, verbose=True)
clf.fit(training.data, training.target)
joblib.dump(clf, '../models/1s_6sps.pkl')
print clf
print 'best_score:', clf.best_score_, 'best C:', clf.best_estimator_.C, 'best gamma:', clf.best_estimator_.gamma
validation = dataset('../datasets/validation')
predicted = clf.predict(validation.data)
truedata = map(lambda x: filters[x], validation.activities)
# http://scikit-learn.org/stable/auto_examples/calibration/plot_calibration_curve.html
precision=precision_score(truedata, predicted, average='macro')
recall=recall_score(truedata, predicted, average='macro')
print "predicted = ", predicted
print "truedata = ", truedata
print "macro precision = ", precision
print "macro recall = ", recall
# Write precision/recall to a file so that we can se how
# the precision of the project's output improves over time.
ts = time.time()
record = str(ts) + ", " + str(precision) + ", " + str(recall) + "\n";
with open("../logs/precision-recall-time-evolution.csv", "a") as myfile:
myfile.write(record)
# Compute confusion matrix
cm = confusion_matrix(truedata, predicted)
print "confusion:"
print(cm)
if arguments['--plot']:
# Show confusion matrix in a separate window
plt.matshow(cm)
plt.title('Confusion matrix')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
data_feed = arguments['--data']
if data_feed:
last_touched = 0
print "Monitoring file " + data_feed
while True:
try:
if (os.path.isdir(data_feed)):
#max(os.listdir('.'), )
all_files_in_df = map(lambda f: os.path.join(data_feed, f), os.listdir(data_feed))
data_file = max(all_files_in_df, key = os.path.getmtime)
else:
data_file = data_feed
# get last modified time
stat_result = os.stat(data_file)
# file changed?
if stat_result.st_mtime != last_touched:
sample = sample_file(data_file)
sample.keep_last_lines(180)
samples = sample.get_samples()
sys.stdout.write("Classification: ")
pr = clf.predict(samples)
with open('../data-gathering/classification', 'w') as f:
f.truncate()
f.write(str(pr))
print pr
else:
print "File didn't change"
last_touched = stat_result.st_mtime
except:
print "Unexpected error", sys.exc_info()[0]
time.sleep(1)
| apache-2.0 |
rs2/pandas | pandas/core/arrays/base.py | 1 | 45568 | """
An interface for extending pandas with custom arrays.
.. warning::
This is an experimental API and subject to breaking changes
without warning.
"""
import operator
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union, cast
import numpy as np
from pandas._libs import lib
from pandas._typing import ArrayLike
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.cast import maybe_cast_to_extension_array
from pandas.core.dtypes.common import (
is_array_like,
is_dtype_equal,
is_list_like,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core import ops
from pandas.core.algorithms import factorize_array, unique
from pandas.core.missing import backfill_1d, pad_1d
from pandas.core.sorting import nargminmax, nargsort
_extension_array_shared_docs: Dict[str, str] = dict()
class ExtensionArray:
"""
Abstract base class for custom 1-D array types.
pandas will recognize instances of this class as proper arrays
with a custom type and will not attempt to coerce them to objects. They
may be stored directly inside a :class:`DataFrame` or :class:`Series`.
Attributes
----------
dtype
nbytes
ndim
shape
Methods
-------
argsort
astype
copy
dropna
factorize
fillna
equals
isna
ravel
repeat
searchsorted
shift
take
unique
view
_concat_same_type
_formatter
_from_factorized
_from_sequence
_from_sequence_of_strings
_reduce
_values_for_argsort
_values_for_factorize
Notes
-----
The interface includes the following abstract methods that must be
implemented by subclasses:
* _from_sequence
* _from_factorized
* __getitem__
* __len__
* __eq__
* dtype
* nbytes
* isna
* take
* copy
* _concat_same_type
A default repr displaying the type, (truncated) data, length,
and dtype is provided. It can be customized or replaced by
by overriding:
* __repr__ : A default repr for the ExtensionArray.
* _formatter : Print scalars inside a Series or DataFrame.
Some methods require casting the ExtensionArray to an ndarray of Python
objects with ``self.astype(object)``, which may be expensive. When
performance is a concern, we highly recommend overriding the following
methods:
* fillna
* dropna
* unique
* factorize / _values_for_factorize
* argsort / _values_for_argsort
* searchsorted
The remaining methods implemented on this class should be performant,
as they only compose abstract methods. Still, a more efficient
implementation may be available, and these methods can be overridden.
One can implement methods to handle array reductions.
* _reduce
One can implement methods to handle parsing from strings that will be used
in methods such as ``pandas.io.parsers.read_csv``.
* _from_sequence_of_strings
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
provided for registering virtual subclasses.
ExtensionArrays are limited to 1 dimension.
They may be backed by none, one, or many NumPy arrays. For example,
``pandas.Categorical`` is an extension array backed by two arrays,
one for codes and one for categories. An array of IPv6 address may
be backed by a NumPy structured array with two fields, one for the
lower 64 bits and one for the upper 64 bits. Or they may be backed
by some other storage type, like Python lists. Pandas makes no
assumptions on how the data are stored, just that it can be converted
to a NumPy array.
The ExtensionArray interface does not impose any rules on how this data
is stored. However, currently, the backing data cannot be stored in
attributes called ``.values`` or ``._values`` to ensure full compatibility
with pandas internals. But other names as ``.data``, ``._data``,
``._items``, ... can be freely used.
If implementing NumPy's ``__array_ufunc__`` interface, pandas expects
that
1. You defer by returning ``NotImplemented`` when any Series are present
in `inputs`. Pandas will extract the arrays and call the ufunc again.
2. You define a ``_HANDLED_TYPES`` tuple as an attribute on the class.
Pandas inspect this to determine whether the ufunc is valid for the
types present.
See :ref:`extending.extension.ufunc` for more.
By default, ExtensionArrays are not hashable. Immutable subclasses may
override this behavior.
"""
# '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray.
# Don't override this.
_typ = "extension"
# ------------------------------------------------------------------------
# Constructors
# ------------------------------------------------------------------------
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""
Construct a new ExtensionArray from a sequence of scalars.
Parameters
----------
scalars : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type`` or be converted into this type in this method.
dtype : dtype, optional
Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
copy : bool, default False
If True, copy the underlying data.
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(cls)
@classmethod
def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
"""
Construct a new ExtensionArray from a sequence of strings.
.. versionadded:: 0.24.0
Parameters
----------
strings : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type``.
dtype : dtype, optional
Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
copy : bool, default False
If True, copy the underlying data.
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(cls)
@classmethod
def _from_factorized(cls, values, original):
"""
Reconstruct an ExtensionArray after factorization.
Parameters
----------
values : ndarray
An integer ndarray with the factorized values.
original : ExtensionArray
The original ExtensionArray that factorize was called on.
See Also
--------
factorize
ExtensionArray.factorize
"""
raise AbstractMethodError(cls)
# ------------------------------------------------------------------------
# Must be a Sequence
# ------------------------------------------------------------------------
def __getitem__(self, item):
# type (Any) -> Any
"""
Select a subset of self.
Parameters
----------
item : int, slice, or ndarray
* int: The position in 'self' to get.
* slice: A slice object, where 'start', 'stop', and 'step' are
integers or None
* ndarray: A 1-d boolean NumPy ndarray the same length as 'self'
Returns
-------
item : scalar or ExtensionArray
Notes
-----
For scalar ``item``, return a scalar value suitable for the array's
type. This should be an instance of ``self.dtype.type``.
For slice ``key``, return an instance of ``ExtensionArray``, even
if the slice is length 0 or 1.
For a boolean mask, return an instance of ``ExtensionArray``, filtered
to the values where ``item`` is True.
"""
raise AbstractMethodError(self)
def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None:
"""
Set one or more values inplace.
This method is not required to satisfy the pandas extension array
interface.
Parameters
----------
key : int, ndarray, or slice
When called from, e.g. ``Series.__setitem__``, ``key`` will be
one of
* scalar int
* ndarray of integers.
* boolean ndarray
* slice object
value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object
value or values to be set of ``key``.
Returns
-------
None
"""
# Some notes to the ExtensionArray implementor who may have ended up
# here. While this method is not required for the interface, if you
# *do* choose to implement __setitem__, then some semantics should be
# observed:
#
# * Setting multiple values : ExtensionArrays should support setting
# multiple values at once, 'key' will be a sequence of integers and
# 'value' will be a same-length sequence.
#
# * Broadcasting : For a sequence 'key' and a scalar 'value',
# each position in 'key' should be set to 'value'.
#
# * Coercion : Most users will expect basic coercion to work. For
# example, a string like '2018-01-01' is coerced to a datetime
# when setting on a datetime64ns array. In general, if the
# __init__ method coerces that value, then so should __setitem__
# Note, also, that Series/DataFrame.where internally use __setitem__
# on a copy of the data.
raise NotImplementedError(f"{type(self)} does not implement __setitem__.")
def __len__(self) -> int:
"""
Length of this array
Returns
-------
length : int
"""
raise AbstractMethodError(self)
def __iter__(self):
"""
Iterate over elements of the array.
"""
# This needs to be implemented so that pandas recognizes extension
# arrays as list-like. The default implementation makes successive
# calls to ``__getitem__``, which may be slower than necessary.
for i in range(len(self)):
yield self[i]
def __eq__(self, other: Any) -> ArrayLike:
"""
Return for `self == other` (element-wise equality).
"""
# Implementer note: this should return a boolean numpy ndarray or
# a boolean ExtensionArray.
# When `other` is one of Series, Index, or DataFrame, this method should
# return NotImplemented (to ensure that those objects are responsible for
# first unpacking the arrays, and then dispatch the operation to the
# underlying arrays)
raise AbstractMethodError(self)
def __ne__(self, other: Any) -> ArrayLike:
"""
Return for `self != other` (element-wise in-equality).
"""
return ~(self == other)
def to_numpy(
self, dtype=None, copy: bool = False, na_value=lib.no_default
) -> np.ndarray:
"""
Convert to a NumPy ndarray.
.. versionadded:: 1.0.0
This is similar to :meth:`numpy.asarray`, but may provide additional control
over how the conversion is done.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the type of the array.
Returns
-------
numpy.ndarray
"""
result = np.asarray(self, dtype=dtype)
if copy or na_value is not lib.no_default:
result = result.copy()
if na_value is not lib.no_default:
result[self.isna()] = na_value
return result
# ------------------------------------------------------------------------
# Required attributes
# ------------------------------------------------------------------------
@property
def dtype(self) -> ExtensionDtype:
"""
An instance of 'ExtensionDtype'.
"""
raise AbstractMethodError(self)
@property
def shape(self) -> Tuple[int, ...]:
"""
Return a tuple of the array dimensions.
"""
return (len(self),)
@property
def size(self) -> int:
"""
The number of elements in the array.
"""
return np.prod(self.shape)
@property
def ndim(self) -> int:
"""
Extension Arrays are only allowed to be 1-dimensional.
"""
return 1
@property
def nbytes(self) -> int:
"""
The number of bytes needed to store this object in memory.
"""
# If this is expensive to compute, return an approximate lower bound
# on the number of bytes needed.
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
# Additional Methods
# ------------------------------------------------------------------------
def astype(self, dtype, copy=True):
"""
Cast to a NumPy array with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray
NumPy ndarray with 'dtype' for its dtype.
"""
from pandas.core.arrays.string_ import StringDtype
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, self.dtype):
if not copy:
return self
elif copy:
return self.copy()
if isinstance(dtype, StringDtype): # allow conversion to StringArrays
return dtype.construct_array_type()._from_sequence(self, copy=False)
return np.array(self, dtype=dtype, copy=copy)
def isna(self) -> ArrayLike:
"""
A 1-D array indicating if each value is missing.
Returns
-------
na_values : Union[np.ndarray, ExtensionArray]
In most cases, this should return a NumPy ndarray. For
exceptional cases like ``SparseArray``, where returning
an ndarray would be expensive, an ExtensionArray may be
returned.
Notes
-----
If returning an ExtensionArray, then
* ``na_values._is_boolean`` should be True
* `na_values` should implement :func:`ExtensionArray._reduce`
* ``na_values.any`` and ``na_values.all`` should be implemented
"""
raise AbstractMethodError(self)
def _values_for_argsort(self) -> np.ndarray:
"""
Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort
"""
# Note: this is used in `ExtensionArray.argsort`.
return np.array(self)
def argsort(
self, ascending: bool = True, kind: str = "quicksort", *args, **kwargs
) -> np.ndarray:
"""
Return the indices that would sort this array.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
Passed through to :func:`numpy.argsort`.
Returns
-------
ndarray
Array of indices that sort ``self``. If NaN values are contained,
NaN values are placed at the end.
See Also
--------
numpy.argsort : Sorting implementation used internally.
"""
# Implementor note: You have two places to override the behavior of
# argsort.
# 1. _values_for_argsort : construct the values passed to np.argsort
# 2. argsort : total control over sorting.
ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
result = nargsort(self, kind=kind, ascending=ascending, na_position="last")
return result
def argmin(self):
"""
Return the index of minimum value.
In case of multiple occurrences of the minimum value, the index
corresponding to the first occurrence is returned.
Returns
-------
int
See Also
--------
ExtensionArray.argmax
"""
return nargminmax(self, "argmin")
def argmax(self):
"""
Return the index of maximum value.
In case of multiple occurrences of the maximum value, the index
corresponding to the first occurrence is returned.
Returns
-------
int
See Also
--------
ExtensionArray.argmin
"""
return nargminmax(self, "argmax")
def fillna(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, array-like
If a scalar value is passed it is used to fill all missing values.
Alternatively, an array-like 'value' can be given. It's expected
that the array-like have the same length as 'self'.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap.
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
ExtensionArray
With NA/NaN filled.
"""
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError(
f"Length of 'value' does not match. Got ({len(value)}) "
f"expected {len(self)}"
)
value = value[mask]
if mask.any():
if method is not None:
func = pad_1d if method == "pad" else backfill_1d
new_values = func(self.astype(object), limit=limit, mask=mask)
new_values = self._from_sequence(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
def dropna(self):
"""
Return ExtensionArray without NA values.
Returns
-------
valid : ExtensionArray
"""
return self[~self.isna()]
def shift(self, periods: int = 1, fill_value: object = None) -> "ExtensionArray":
"""
Shift values by desired number.
Newly introduced missing values are filled with
``self.dtype.na_value``.
.. versionadded:: 0.24.0
Parameters
----------
periods : int, default 1
The number of periods to shift. Negative values are allowed
for shifting backwards.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default is ``self.dtype.na_value``.
.. versionadded:: 0.24.0
Returns
-------
ExtensionArray
Shifted.
Notes
-----
If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is
returned.
If ``periods > len(self)``, then an array of size
len(self) is returned, with all values filled with
``self.dtype.na_value``.
"""
# Note: this implementation assumes that `self.dtype.na_value` can be
# stored in an instance of your ExtensionArray with `self.dtype`.
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
empty = self._from_sequence(
[fill_value] * min(abs(periods), len(self)), dtype=self.dtype
)
if periods > 0:
a = empty
b = self[:-periods]
else:
a = self[abs(periods) :]
b = empty
return self._concat_same_type([a, b])
def unique(self):
"""
Compute the ExtensionArray of unique values.
Returns
-------
uniques : ExtensionArray
"""
uniques = unique(self.astype(object))
return self._from_sequence(uniques, dtype=self.dtype)
def searchsorted(self, value, side="left", sorter=None):
"""
Find indices where elements should be inserted to maintain order.
.. versionadded:: 0.24.0
Find the indices into a sorted array `self` (a) such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Assuming that `self` is sorted:
====== ================================
`side` returned index `i` satisfies
====== ================================
left ``self[i-1] < value <= self[i]``
right ``self[i-1] <= value < self[i]``
====== ================================
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
Returns
-------
array of ints
Array of insertion points with the same shape as `value`.
See Also
--------
numpy.searchsorted : Similar method from NumPy.
"""
# Note: the base tests provided by pandas only test the basics.
# We do not test
# 1. Values outside the range of the `data_for_sorting` fixture
# 2. Values between the values in the `data_for_sorting` fixture
# 3. Missing values.
arr = self.astype(object)
return arr.searchsorted(value, side=side, sorter=sorter)
def equals(self, other: object) -> bool:
"""
Return if another array is equivalent to this array.
Equivalent means that both arrays have the same shape and dtype, and
all values compare equal. Missing values in the same location are
considered equal (in contrast with normal equality).
Parameters
----------
other : ExtensionArray
Array to compare to this Array.
Returns
-------
boolean
Whether the arrays are equivalent.
"""
if not type(self) == type(other):
return False
other = cast(ExtensionArray, other)
if not is_dtype_equal(self.dtype, other.dtype):
return False
elif not len(self) == len(other):
return False
else:
equal_values = self == other
if isinstance(equal_values, ExtensionArray):
# boolean array with NA -> fill with False
equal_values = equal_values.fillna(False)
equal_na = self.isna() & other.isna()
return bool((equal_values | equal_na).all())
def _values_for_factorize(self) -> Tuple[np.ndarray, Any]:
"""
Return an array and missing value suitable for factorization.
Returns
-------
values : ndarray
An array suitable for factorization. This should maintain order
and be a supported dtype (Float64, Int64, UInt64, String, Object).
By default, the extension array is cast to object dtype.
na_value : object
The value in `values` to consider missing. This will be treated
as NA in the factorization routines, so it will be coded as
`na_sentinel` and not included in `uniques`. By default,
``np.nan`` is used.
Notes
-----
The values returned by this method are also used in
:func:`pandas.util.hash_pandas_object`.
"""
return self.astype(object), np.nan
def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, "ExtensionArray"]:
"""
Encode the extension array as an enumerated type.
Parameters
----------
na_sentinel : int, default -1
Value to use in the `codes` array to indicate missing values.
Returns
-------
codes : ndarray
An integer NumPy array that's an indexer into the original
ExtensionArray.
uniques : ExtensionArray
An ExtensionArray containing the unique values of `self`.
.. note::
uniques will *not* contain an entry for the NA value of
the ExtensionArray if there are any missing values present
in `self`.
See Also
--------
factorize : Top-level factorize method that dispatches here.
Notes
-----
:meth:`pandas.factorize` offers a `sort` keyword as well.
"""
# Implementer note: There are two ways to override the behavior of
# pandas.factorize
# 1. _values_for_factorize and _from_factorize.
# Specify the values passed to pandas' internal factorization
# routines, and how to convert from those values back to the
# original ExtensionArray.
# 2. ExtensionArray.factorize.
# Complete control over factorization.
arr, na_value = self._values_for_factorize()
codes, uniques = factorize_array(
arr, na_sentinel=na_sentinel, na_value=na_value
)
uniques = self._from_factorized(uniques, self)
return codes, uniques
_extension_array_shared_docs[
"repeat"
] = """
Repeat elements of a %(klass)s.
Returns a new %(klass)s where each element of the current %(klass)s
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
%(klass)s.
axis : None
Must be ``None``. Has no effect but is accepted for compatibility
with numpy.
Returns
-------
repeated_array : %(klass)s
Newly created %(klass)s with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series.
Index.repeat : Equivalent function for Index.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
ExtensionArray.take : Take arbitrary positions.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
['a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
>>> cat.repeat(2)
['a', 'a', 'b', 'b', 'c', 'c']
Categories (3, object): ['a', 'b', 'c']
>>> cat.repeat([1, 2, 3])
['a', 'b', 'b', 'c', 'c', 'c']
Categories (3, object): ['a', 'b', 'c']
"""
@Substitution(klass="ExtensionArray")
@Appender(_extension_array_shared_docs["repeat"])
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
ind = np.arange(len(self)).repeat(repeats)
return self.take(ind)
# ------------------------------------------------------------------------
# Indexing methods
# ------------------------------------------------------------------------
def take(
self, indices: Sequence[int], allow_fill: bool = False, fill_value: Any = None
) -> "ExtensionArray":
"""
Take elements from an array.
Parameters
----------
indices : sequence of int
Indices to be taken.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : any, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type, ``self.dtype.na_value``, is used.
For many ExtensionArrays, there will be two representations of
`fill_value`: a user-facing "boxed" scalar, and a low-level
physical NA value. `fill_value` should be the user-facing version,
and the implementation should handle translating that to the
physical version for processing the take if necessary.
Returns
-------
ExtensionArray
Raises
------
IndexError
When the indices are out of bounds for the array.
ValueError
When `indices` contains negative values other than ``-1``
and `allow_fill` is True.
See Also
--------
numpy.take
api.extensions.take
Notes
-----
ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,
``iloc``, when `indices` is a sequence of values. Additionally,
it's called by :meth:`Series.reindex`, or any other method
that causes realignment, with a `fill_value`.
Examples
--------
Here's an example implementation, which relies on casting the
extension array to object dtype. This uses the helper method
:func:`pandas.api.extensions.take`.
.. code-block:: python
def take(self, indices, allow_fill=False, fill_value=None):
from pandas.core.algorithms import take
# If the ExtensionArray is backed by an ndarray, then
# just pass that here instead of coercing to object.
data = self.astype(object)
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
# fill value should always be translated from the scalar
# type for the array, to the physical storage type for
# the data, before passing to take.
result = take(data, indices, fill_value=fill_value,
allow_fill=allow_fill)
return self._from_sequence(result, dtype=self.dtype)
"""
# Implementer note: The `fill_value` parameter should be a user-facing
# value, an instance of self.dtype.type. When passed `fill_value=None`,
# the default of `self.dtype.na_value` should be used.
# This may differ from the physical storage type your ExtensionArray
# uses. In this case, your implementation is responsible for casting
# the user-facing type to the storage type, before using
# pandas.api.extensions.take
raise AbstractMethodError(self)
def copy(self) -> "ExtensionArray":
"""
Return a copy of the array.
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(self)
def view(self, dtype=None) -> ArrayLike:
"""
Return a view on the array.
Parameters
----------
dtype : str, np.dtype, or ExtensionDtype, optional
Default None.
Returns
-------
ExtensionArray or np.ndarray
A view on the :class:`ExtensionArray`'s data.
"""
# NB:
# - This must return a *new* object referencing the same data, not self.
# - The only case that *must* be implemented is with dtype=None,
# giving a view with the same dtype as self.
if dtype is not None:
raise NotImplementedError(dtype)
return self[:]
# ------------------------------------------------------------------------
# Printing
# ------------------------------------------------------------------------
def __repr__(self) -> str:
from pandas.io.formats.printing import format_object_summary
# the short repr has no trailing newline, while the truncated
# repr does. So we include a newline in our template, and strip
# any trailing newlines from format_object_summary
data = format_object_summary(
self, self._formatter(), indent_for_name=False
).rstrip(", \n")
class_name = f"<{type(self).__name__}>\n"
return f"{class_name}{data}\nLength: {len(self)}, dtype: {self.dtype}"
def _formatter(self, boxed: bool = False) -> Callable[[Any], Optional[str]]:
"""
Formatting function for scalar values.
This is used in the default '__repr__'. The returned formatting
function receives instances of your scalar type.
Parameters
----------
boxed : bool, default False
An indicated for whether or not your array is being printed
within a Series, DataFrame, or Index (True), or just by
itself (False). This may be useful if you want scalar values
to appear differently within a Series versus on its own (e.g.
quoted or not).
Returns
-------
Callable[[Any], str]
A callable that gets instances of the scalar type and
returns a string. By default, :func:`repr` is used
when ``boxed=False`` and :func:`str` is used when
``boxed=True``.
"""
if boxed:
return str
return repr
# ------------------------------------------------------------------------
# Reshaping
# ------------------------------------------------------------------------
def ravel(self, order="C") -> "ExtensionArray":
"""
Return a flattened view on this array.
Parameters
----------
order : {None, 'C', 'F', 'A', 'K'}, default 'C'
Returns
-------
ExtensionArray
Notes
-----
- Because ExtensionArrays are 1D-only, this is a no-op.
- The "order" argument is ignored, is for compatibility with NumPy.
"""
return self
@classmethod
def _concat_same_type(
cls, to_concat: Sequence["ExtensionArray"]
) -> "ExtensionArray":
"""
Concatenate multiple array of this dtype.
Parameters
----------
to_concat : sequence of this type
Returns
-------
ExtensionArray
"""
# Implementer note: this method will only be called with a sequence of
# ExtensionArrays of this class and with the same dtype as self. This
# should allow "easy" concatenation (no upcasting needed), and result
# in a new ExtensionArray of the same dtype.
# Note: this strict behaviour is only guaranteed starting with pandas 1.1
raise AbstractMethodError(cls)
# The _can_hold_na attribute is set to True so that pandas internals
# will use the ExtensionDtype.na_value as the NA value in operations
# such as take(), reindex(), shift(), etc. In addition, those results
# will then be of the ExtensionArray subclass rather than an array
# of objects
_can_hold_na = True
def _reduce(self, name: str, skipna: bool = True, **kwargs):
"""
Return a scalar result of performing the reduction operation.
Parameters
----------
name : str
Name of the function, supported values are:
{ any, all, min, max, sum, mean, median, prod,
std, var, sem, kurt, skew }.
skipna : bool, default True
If True, skip NaN values.
**kwargs
Additional keyword arguments passed to the reduction function.
Currently, `ddof` is the only supported kwarg.
Returns
-------
scalar
Raises
------
TypeError : subclass does not define reductions
"""
raise TypeError(f"cannot perform {name} with type {self.dtype}")
def __hash__(self):
raise TypeError(f"unhashable type: {repr(type(self).__name__)}")
class ExtensionOpsMixin:
"""
A base class for linking the operators to their dunder names.
.. note::
You may want to set ``__array_priority__`` if you want your
implementation to be called when involved in binary operations
with NumPy arrays.
"""
@classmethod
def _create_arithmetic_method(cls, op):
raise AbstractMethodError(cls)
@classmethod
def _add_arithmetic_ops(cls):
cls.__add__ = cls._create_arithmetic_method(operator.add)
cls.__radd__ = cls._create_arithmetic_method(ops.radd)
cls.__sub__ = cls._create_arithmetic_method(operator.sub)
cls.__rsub__ = cls._create_arithmetic_method(ops.rsub)
cls.__mul__ = cls._create_arithmetic_method(operator.mul)
cls.__rmul__ = cls._create_arithmetic_method(ops.rmul)
cls.__pow__ = cls._create_arithmetic_method(operator.pow)
cls.__rpow__ = cls._create_arithmetic_method(ops.rpow)
cls.__mod__ = cls._create_arithmetic_method(operator.mod)
cls.__rmod__ = cls._create_arithmetic_method(ops.rmod)
cls.__floordiv__ = cls._create_arithmetic_method(operator.floordiv)
cls.__rfloordiv__ = cls._create_arithmetic_method(ops.rfloordiv)
cls.__truediv__ = cls._create_arithmetic_method(operator.truediv)
cls.__rtruediv__ = cls._create_arithmetic_method(ops.rtruediv)
cls.__divmod__ = cls._create_arithmetic_method(divmod)
cls.__rdivmod__ = cls._create_arithmetic_method(ops.rdivmod)
@classmethod
def _create_comparison_method(cls, op):
raise AbstractMethodError(cls)
@classmethod
def _add_comparison_ops(cls):
cls.__eq__ = cls._create_comparison_method(operator.eq)
cls.__ne__ = cls._create_comparison_method(operator.ne)
cls.__lt__ = cls._create_comparison_method(operator.lt)
cls.__gt__ = cls._create_comparison_method(operator.gt)
cls.__le__ = cls._create_comparison_method(operator.le)
cls.__ge__ = cls._create_comparison_method(operator.ge)
@classmethod
def _create_logical_method(cls, op):
raise AbstractMethodError(cls)
@classmethod
def _add_logical_ops(cls):
cls.__and__ = cls._create_logical_method(operator.and_)
cls.__rand__ = cls._create_logical_method(ops.rand_)
cls.__or__ = cls._create_logical_method(operator.or_)
cls.__ror__ = cls._create_logical_method(ops.ror_)
cls.__xor__ = cls._create_logical_method(operator.xor)
cls.__rxor__ = cls._create_logical_method(ops.rxor)
class ExtensionScalarOpsMixin(ExtensionOpsMixin):
"""
A mixin for defining ops on an ExtensionArray.
It is assumed that the underlying scalar objects have the operators
already defined.
Notes
-----
If you have defined a subclass MyExtensionArray(ExtensionArray), then
use MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin) to
get the arithmetic operators. After the definition of MyExtensionArray,
insert the lines
MyExtensionArray._add_arithmetic_ops()
MyExtensionArray._add_comparison_ops()
to link the operators to your class.
.. note::
You may want to set ``__array_priority__`` if you want your
implementation to be called when involved in binary operations
with NumPy arrays.
"""
@classmethod
def _create_method(cls, op, coerce_to_dtype=True, result_dtype=None):
"""
A class method that returns a method that will correspond to an
operator for an ExtensionArray subclass, by dispatching to the
relevant operator defined on the individual elements of the
ExtensionArray.
Parameters
----------
op : function
An operator that takes arguments op(a, b)
coerce_to_dtype : bool, default True
boolean indicating whether to attempt to convert
the result to the underlying ExtensionArray dtype.
If it's not possible to create a new ExtensionArray with the
values, an ndarray is returned instead.
Returns
-------
Callable[[Any, Any], Union[ndarray, ExtensionArray]]
A method that can be bound to a class. When used, the method
receives the two arguments, one of which is the instance of
this class, and should return an ExtensionArray or an ndarray.
Returning an ndarray may be necessary when the result of the
`op` cannot be stored in the ExtensionArray. The dtype of the
ndarray uses NumPy's normal inference rules.
Examples
--------
Given an ExtensionArray subclass called MyExtensionArray, use
__add__ = cls._create_method(operator.add)
in the class definition of MyExtensionArray to create the operator
for addition, that will be based on the operator implementation
of the underlying elements of the ExtensionArray
"""
def _binop(self, other):
def convert_values(param):
if isinstance(param, ExtensionArray) or is_list_like(param):
ovalues = param
else: # Assume its an object
ovalues = [param] * len(self)
return ovalues
if isinstance(other, (ABCSeries, ABCIndexClass, ABCDataFrame)):
# rely on pandas to unbox and dispatch to us
return NotImplemented
lvalues = self
rvalues = convert_values(other)
# If the operator is not defined for the underlying objects,
# a TypeError should be raised
res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
def _maybe_convert(arr):
if coerce_to_dtype:
# https://github.com/pandas-dev/pandas/issues/22850
# We catch all regular exceptions here, and fall back
# to an ndarray.
res = maybe_cast_to_extension_array(type(self), arr)
if not isinstance(res, type(self)):
# exception raised in _from_sequence; ensure we have ndarray
res = np.asarray(arr)
else:
res = np.asarray(arr, dtype=result_dtype)
return res
if op.__name__ in {"divmod", "rdivmod"}:
a, b = zip(*res)
return _maybe_convert(a), _maybe_convert(b)
return _maybe_convert(res)
op_name = f"__{op.__name__}__"
return set_function_name(_binop, op_name, cls)
@classmethod
def _create_arithmetic_method(cls, op):
return cls._create_method(op)
@classmethod
def _create_comparison_method(cls, op):
return cls._create_method(op, coerce_to_dtype=False, result_dtype=bool)
| bsd-3-clause |
manahl/arctic | arctic/chunkstore/chunkstore.py | 1 | 28526 | import hashlib
import logging
from collections import defaultdict
from itertools import groupby
import pymongo
from bson.binary import Binary
from pandas import DataFrame, Series
from pymongo.errors import OperationFailure
from six.moves import xrange
from .date_chunker import DateChunker, START, END
from .passthrough_chunker import PassthroughChunker
from .._util import indent, mongo_count, enable_sharding
from ..decorators import mongo_retry
from ..exceptions import NoDataFoundException
from ..serialization.numpy_arrays import FrametoArraySerializer, DATA, METADATA, COLUMNS
logger = logging.getLogger(__name__)
CHUNK_STORE_TYPE = 'ChunkStoreV1'
SYMBOL = 'sy'
SHA = 'sh'
CHUNK_SIZE = 'cs'
CHUNK_COUNT = 'cc'
SEGMENT = 'sg'
APPEND_COUNT = 'ac'
LEN = 'l'
SERIALIZER = 'se'
CHUNKER = 'ch'
USERMETA = 'u'
MAX_CHUNK_SIZE = 15 * 1024 * 1024
SER_MAP = {FrametoArraySerializer.TYPE: FrametoArraySerializer()}
CHUNKER_MAP = {DateChunker.TYPE: DateChunker(),
PassthroughChunker.TYPE: PassthroughChunker()}
class ChunkStore(object):
@classmethod
def initialize_library(cls, arctic_lib, hashed=True, **kwargs):
ChunkStore(arctic_lib)._ensure_index()
logger.info("Trying to enable sharding...")
try:
enable_sharding(arctic_lib.arctic, arctic_lib.get_name(), hashed=hashed, key=SYMBOL)
except OperationFailure as e:
logger.warning("Library created, but couldn't enable sharding: %s. This is OK if you're not 'admin'" % str(e))
@mongo_retry
def _ensure_index(self):
self._symbols.create_index([(SYMBOL, pymongo.ASCENDING)],
unique=True,
background=True)
self._collection.create_index([(SYMBOL, pymongo.HASHED)],
background=True)
self._collection.create_index([(SYMBOL, pymongo.ASCENDING),
(SHA, pymongo.ASCENDING)],
unique=True,
background=True)
self._collection.create_index([(SYMBOL, pymongo.ASCENDING),
(START, pymongo.ASCENDING),
(END, pymongo.ASCENDING),
(SEGMENT, pymongo.ASCENDING)],
unique=True, background=True)
self._collection.create_index([(SYMBOL, pymongo.ASCENDING),
(START, pymongo.ASCENDING),
(SEGMENT, pymongo.ASCENDING)],
unique=True, background=True)
self._collection.create_index([(SEGMENT, pymongo.ASCENDING)],
unique=False, background=True)
self._mdata.create_index([(SYMBOL, pymongo.ASCENDING),
(START, pymongo.ASCENDING),
(END, pymongo.ASCENDING)],
unique=True, background=True)
def __init__(self, arctic_lib):
self._arctic_lib = arctic_lib
self.serializer = FrametoArraySerializer()
# Do we allow reading from secondaries
self._allow_secondary = self._arctic_lib.arctic._allow_secondary
self._reset()
@mongo_retry
def _reset(self):
# The default collection
self._collection = self._arctic_lib.get_top_level_collection()
self._symbols = self._collection.symbols
self._mdata = self._collection.metadata
self._audit = self._collection.audit
def __getstate__(self):
return {'arctic_lib': self._arctic_lib}
def __setstate__(self, state):
return ChunkStore.__init__(self, state['arctic_lib'])
def __str__(self):
return """<%s at %s>\n%s""" % (self.__class__.__name__, hex(id(self)),
indent(str(self._arctic_lib), 4))
def __repr__(self):
return str(self)
def _checksum(self, fields, data):
"""
Checksum the passed in dictionary
"""
sha = hashlib.sha1()
for field in fields:
sha.update(field)
sha.update(data)
return Binary(sha.digest())
def delete(self, symbol, chunk_range=None, audit=None):
"""
Delete all chunks for a symbol, or optionally, chunks within a range
Parameters
----------
symbol : str
symbol name for the item
chunk_range: range object
a date range to delete
audit: dict
dict to store in the audit log
"""
if chunk_range is not None:
sym = self._get_symbol_info(symbol)
# read out chunks that fall within the range and filter out
# data within the range
df = self.read(symbol, chunk_range=chunk_range, filter_data=False)
row_adjust = len(df)
if not df.empty:
df = CHUNKER_MAP[sym[CHUNKER]].exclude(df, chunk_range)
# remove chunks, and update any remaining data
query = {SYMBOL: symbol}
query.update(CHUNKER_MAP[sym[CHUNKER]].to_mongo(chunk_range))
self._collection.delete_many(query)
self._mdata.delete_many(query)
self.update(symbol, df)
# update symbol metadata (rows and chunk count)
sym = self._get_symbol_info(symbol)
sym[LEN] -= row_adjust
sym[CHUNK_COUNT] = mongo_count(self._collection, filter={SYMBOL: symbol})
self._symbols.replace_one({SYMBOL: symbol}, sym)
else:
query = {SYMBOL: symbol}
self._collection.delete_many(query)
self._symbols.delete_many(query)
self._mdata.delete_many(query)
if audit is not None:
audit['symbol'] = symbol
if chunk_range is not None:
audit['rows_deleted'] = row_adjust
audit['action'] = 'range delete'
else:
audit['action'] = 'symbol delete'
self._audit.insert_one(audit)
def list_symbols(self, partial_match=None):
"""
Returns all symbols in the library
Parameters
----------
partial: None or str
if not none, use this string to do a partial match on symbol names
Returns
-------
list of str
"""
symbols = self._symbols.distinct(SYMBOL)
if partial_match is None:
return symbols
return [x for x in symbols if partial_match in x]
def _get_symbol_info(self, symbol):
if isinstance(symbol, list):
return list(self._symbols.find({SYMBOL: {'$in': symbol}}))
return self._symbols.find_one({SYMBOL: symbol})
def rename(self, from_symbol, to_symbol, audit=None):
"""
Rename a symbol
Parameters
----------
from_symbol: str
the existing symbol that will be renamed
to_symbol: str
the new symbol name
audit: dict
audit information
"""
sym = self._get_symbol_info(from_symbol)
if not sym:
raise NoDataFoundException('No data found for %s' % (from_symbol))
if self._get_symbol_info(to_symbol) is not None:
raise Exception('Symbol %s already exists' % (to_symbol))
mongo_retry(self._collection.update_many)({SYMBOL: from_symbol},
{'$set': {SYMBOL: to_symbol}})
mongo_retry(self._symbols.update_one)({SYMBOL: from_symbol},
{'$set': {SYMBOL: to_symbol}})
mongo_retry(self._mdata.update_many)({SYMBOL: from_symbol},
{'$set': {SYMBOL: to_symbol}})
mongo_retry(self._audit.update_many)({'symbol': from_symbol},
{'$set': {'symbol': to_symbol}})
if audit is not None:
audit['symbol'] = to_symbol
audit['action'] = 'symbol rename'
audit['old_symbol'] = from_symbol
self._audit.insert_one(audit)
def read(self, symbol, chunk_range=None, filter_data=True, **kwargs):
"""
Reads data for a given symbol from the database.
Parameters
----------
symbol: str, or list of str
the symbol(s) to retrieve
chunk_range: object
corresponding range object for the specified chunker (for
DateChunker it is a DateRange object or a DatetimeIndex,
as returned by pandas.date_range
filter_data: boolean
perform chunk level filtering on the data (see filter in _chunker)
only applicable when chunk_range is specified
kwargs: ?
values passed to the serializer. Varies by serializer
Returns
-------
DataFrame or Series, or in the case when multiple symbols are given,
returns a dict of symbols (symbol -> dataframe/series)
"""
if not isinstance(symbol, list):
symbol = [symbol]
sym = self._get_symbol_info(symbol)
if not sym:
raise NoDataFoundException('No data found for %s' % (symbol))
spec = {SYMBOL: {'$in': symbol}}
chunker = CHUNKER_MAP[sym[0][CHUNKER]]
deser = SER_MAP[sym[0][SERIALIZER]].deserialize
if chunk_range is not None:
spec.update(chunker.to_mongo(chunk_range))
by_start_segment = [(SYMBOL, pymongo.ASCENDING),
(START, pymongo.ASCENDING),
(SEGMENT, pymongo.ASCENDING)]
segment_cursor = self._collection.find(spec, sort=by_start_segment)
chunks = defaultdict(list)
for _, segments in groupby(segment_cursor, key=lambda x: (x[START], x[SYMBOL])):
segments = list(segments)
mdata = self._mdata.find_one({SYMBOL: segments[0][SYMBOL],
START: segments[0][START],
END: segments[0][END]})
# when len(segments) == 1, this is essentially a no-op
# otherwise, take all segments and reassemble the data to one chunk
chunk_data = b''.join([doc[DATA] for doc in segments])
chunks[segments[0][SYMBOL]].append({DATA: chunk_data, METADATA: mdata})
skip_filter = not filter_data or chunk_range is None
if len(symbol) > 1:
return {sym: deser(chunks[sym], **kwargs) if skip_filter else chunker.filter(deser(chunks[sym], **kwargs), chunk_range) for sym in symbol}
else:
return deser(chunks[symbol[0]], **kwargs) if skip_filter else chunker.filter(deser(chunks[symbol[0]], **kwargs), chunk_range)
def read_audit_log(self, symbol=None):
"""
Reads the audit log
Parameters
----------
symbol: str
optionally only retrieve specific symbol's audit information
Returns
-------
list of dicts
"""
if symbol:
return [x for x in self._audit.find({'symbol': symbol}, {'_id': False})]
return [x for x in self._audit.find({}, {'_id': False})]
def write(self, symbol, item, metadata=None, chunker=DateChunker(), audit=None, **kwargs):
"""
Writes data from item to symbol in the database
Parameters
----------
symbol: str
the symbol that will be used to reference the written data
item: Dataframe or Series
the data to write the database
metadata: ?
optional per symbol metadata
chunker: Object of type Chunker
A chunker that chunks the data in item
audit: dict
audit information
kwargs:
optional keyword args that are passed to the chunker. Includes:
chunk_size:
used by chunker to break data into discrete chunks.
see specific chunkers for more information about this param.
func: function
function to apply to each chunk before writing. Function
can not modify the date column.
"""
if not isinstance(item, (DataFrame, Series)):
raise Exception("Can only chunk DataFrames and Series")
self._arctic_lib.check_quota()
previous_shas = []
doc = {}
meta = {}
doc[SYMBOL] = symbol
doc[LEN] = len(item)
doc[SERIALIZER] = self.serializer.TYPE
doc[CHUNKER] = chunker.TYPE
doc[USERMETA] = metadata
sym = self._get_symbol_info(symbol)
if sym:
previous_shas = set([Binary(x[SHA]) for x in self._collection.find({SYMBOL: symbol},
projection={SHA: True, '_id': False},
)])
ops = []
meta_ops = []
chunk_count = 0
for start, end, chunk_size, record in chunker.to_chunks(item, **kwargs):
chunk_count += 1
data = self.serializer.serialize(record)
doc[CHUNK_SIZE] = chunk_size
doc[METADATA] = {'columns': data[METADATA][COLUMNS] if COLUMNS in data[METADATA] else ''}
meta = data[METADATA]
for i in xrange(int(len(data[DATA]) / MAX_CHUNK_SIZE + 1)):
chunk = {DATA: Binary(data[DATA][i * MAX_CHUNK_SIZE: (i + 1) * MAX_CHUNK_SIZE])}
chunk[SEGMENT] = i
chunk[START] = meta[START] = start
chunk[END] = meta[END] = end
chunk[SYMBOL] = meta[SYMBOL] = symbol
dates = [chunker.chunk_to_str(start), chunker.chunk_to_str(end), str(chunk[SEGMENT]).encode('ascii')]
chunk[SHA] = self._checksum(dates, chunk[DATA])
meta_ops.append(pymongo.ReplaceOne({SYMBOL: symbol,
START: start,
END: end},
meta, upsert=True))
if chunk[SHA] not in previous_shas:
ops.append(pymongo.UpdateOne({SYMBOL: symbol,
START: start,
END: end,
SEGMENT: chunk[SEGMENT]},
{'$set': chunk}, upsert=True))
else:
# already exists, dont need to update in mongo
previous_shas.remove(chunk[SHA])
if ops:
self._collection.bulk_write(ops, ordered=False)
if meta_ops:
self._mdata.bulk_write(meta_ops, ordered=False)
doc[CHUNK_COUNT] = chunk_count
doc[APPEND_COUNT] = 0
if previous_shas:
mongo_retry(self._collection.delete_many)({SYMBOL: symbol, SHA: {'$in': list(previous_shas)}})
mongo_retry(self._symbols.update_one)({SYMBOL: symbol},
{'$set': doc},
upsert=True)
if audit is not None:
audit['symbol'] = symbol
audit['action'] = 'write'
audit['chunks'] = chunk_count
self._audit.insert_one(audit)
def __update(self, sym, item, metadata=None, combine_method=None, chunk_range=None, audit=None):
'''
helper method used by update and append since they very closely
resemble eachother. Really differ only by the combine method.
append will combine existing date with new data (within a chunk),
whereas update will replace existing data with new data (within a
chunk).
'''
if not isinstance(item, (DataFrame, Series)):
raise Exception("Can only chunk DataFrames and Series")
self._arctic_lib.check_quota()
symbol = sym[SYMBOL]
if chunk_range is not None:
self.delete(symbol, chunk_range)
sym = self._get_symbol_info(symbol)
ops = []
meta_ops = []
chunker = CHUNKER_MAP[sym[CHUNKER]]
appended = 0
new_chunks = 0
for start, end, _, record in chunker.to_chunks(item, chunk_size=sym[CHUNK_SIZE]):
# read out matching chunks
df = self.read(symbol, chunk_range=chunker.to_range(start, end), filter_data=False)
# assuming they exist, update them and store the original chunk
# range for later use
if len(df) > 0:
record = combine_method(df, record)
if record is None or record.equals(df):
continue
sym[APPEND_COUNT] += len(record) - len(df)
appended += len(record) - len(df)
sym[LEN] += len(record) - len(df)
else:
sym[CHUNK_COUNT] += 1
new_chunks += 1
sym[LEN] += len(record)
data = SER_MAP[sym[SERIALIZER]].serialize(record)
meta = data[METADATA]
chunk_count = int(len(data[DATA]) / MAX_CHUNK_SIZE + 1)
seg_count = mongo_count(self._collection, filter={SYMBOL: symbol, START: start, END: end})
# remove old segments for this chunk in case we now have less
# segments than we did before
if seg_count > chunk_count:
self._collection.delete_many({SYMBOL: symbol,
START: start,
END: end,
SEGMENT: {'$gte': chunk_count}})
for i in xrange(chunk_count):
chunk = {DATA: Binary(data[DATA][i * MAX_CHUNK_SIZE: (i + 1) * MAX_CHUNK_SIZE])}
chunk[SEGMENT] = i
chunk[START] = start
chunk[END] = end
chunk[SYMBOL] = symbol
dates = [chunker.chunk_to_str(start), chunker.chunk_to_str(end), str(chunk[SEGMENT]).encode('ascii')]
sha = self._checksum(dates, data[DATA])
chunk[SHA] = sha
ops.append(pymongo.UpdateOne({SYMBOL: symbol,
START: start,
END: end,
SEGMENT: chunk[SEGMENT]},
{'$set': chunk}, upsert=True))
meta_ops.append(pymongo.UpdateOne({SYMBOL: symbol,
START: start,
END: end},
{'$set': meta}, upsert=True))
if ops:
self._collection.bulk_write(ops, ordered=False)
self._mdata.bulk_write(meta_ops, ordered=False)
sym[USERMETA] = metadata
self._symbols.replace_one({SYMBOL: symbol}, sym)
if audit is not None:
if new_chunks > 0:
audit['new_chunks'] = new_chunks
if appended > 0:
audit['appended_rows'] = appended
self._audit.insert_one(audit)
def append(self, symbol, item, upsert=False, metadata=None, audit=None, **kwargs):
"""
Appends data from item to symbol's data in the database.
Is not idempotent
Parameters
----------
symbol: str
the symbol for the given item in the DB
item: DataFrame or Series
the data to append
upsert:
write data if symbol does not exist
metadata: ?
optional per symbol metadata
audit: dict
optional audit information
kwargs:
passed to write if upsert is true and symbol does not exist
"""
sym = self._get_symbol_info(symbol)
if not sym:
if upsert:
return self.write(symbol, item, metadata=metadata, audit=audit, **kwargs)
else:
raise NoDataFoundException("Symbol does not exist.")
if audit is not None:
audit['symbol'] = symbol
audit['action'] = 'append'
self.__update(sym, item, metadata=metadata, combine_method=SER_MAP[sym[SERIALIZER]].combine, audit=audit)
def update(self, symbol, item, metadata=None, chunk_range=None, upsert=False, audit=None, **kwargs):
"""
Overwrites data in DB with data in item for the given symbol.
Is idempotent
Parameters
----------
symbol: str
the symbol for the given item in the DB
item: DataFrame or Series
the data to update
metadata: ?
optional per symbol metadata
chunk_range: None, or a range object
If a range is specified, it will clear/delete the data within the
range and overwrite it with the data in item. This allows the user
to update with data that might only be a subset of the
original data.
upsert: bool
if True, will write the data even if the symbol does not exist.
audit: dict
optional audit information
kwargs:
optional keyword args passed to write during an upsert. Includes:
chunk_size
chunker
"""
sym = self._get_symbol_info(symbol)
if not sym:
if upsert:
return self.write(symbol, item, metadata=metadata, audit=audit, **kwargs)
else:
raise NoDataFoundException("Symbol does not exist.")
if audit is not None:
audit['symbol'] = symbol
audit['action'] = 'update'
if chunk_range is not None:
if len(CHUNKER_MAP[sym[CHUNKER]].filter(item, chunk_range)) == 0:
raise Exception('Range must be inclusive of data')
self.__update(sym, item, metadata=metadata, combine_method=self.serializer.combine, chunk_range=chunk_range, audit=audit)
else:
self.__update(sym, item, metadata=metadata, combine_method=lambda old, new: new, chunk_range=chunk_range, audit=audit)
def get_info(self, symbol):
"""
Returns information about the symbol, in a dictionary
Parameters
----------
symbol: str
the symbol for the given item in the DB
Returns
-------
dictionary
"""
sym = self._get_symbol_info(symbol)
if not sym:
raise NoDataFoundException("Symbol does not exist.")
ret = {}
ret['chunk_count'] = sym[CHUNK_COUNT]
ret['len'] = sym[LEN]
ret['appended_rows'] = sym[APPEND_COUNT]
ret['metadata'] = sym[METADATA] if METADATA in sym else None
ret['chunker'] = sym[CHUNKER]
ret['chunk_size'] = sym[CHUNK_SIZE] if CHUNK_SIZE in sym else 0
ret['serializer'] = sym[SERIALIZER]
return ret
def read_metadata(self, symbol):
'''
Reads user defined metadata out for the given symbol
Parameters
----------
symbol: str
symbol for the given item in the DB
Returns
-------
?
'''
sym = self._get_symbol_info(symbol)
if not sym:
raise NoDataFoundException("Symbol does not exist.")
x = self._symbols.find_one({SYMBOL: symbol})
return x[USERMETA] if USERMETA in x else None
def write_metadata(self, symbol, metadata):
'''
writes user defined metadata for the given symbol
Parameters
----------
symbol: str
symbol for the given item in the DB
metadata: ?
metadata to write
'''
sym = self._get_symbol_info(symbol)
if not sym:
raise NoDataFoundException("Symbol does not exist.")
sym[USERMETA] = metadata
self._symbols.replace_one({SYMBOL: symbol}, sym)
def get_chunk_ranges(self, symbol, chunk_range=None, reverse=False):
"""
Returns a generator of (Start, End) tuples for each chunk in the symbol
Parameters
----------
symbol: str
the symbol for the given item in the DB
chunk_range: None, or a range object
allows you to subset the chunks by range
reverse: boolean
return the chunk ranges in reverse order
Returns
-------
generator
"""
sym = self._get_symbol_info(symbol)
if not sym:
raise NoDataFoundException("Symbol does not exist.")
c = CHUNKER_MAP[sym[CHUNKER]]
# all symbols have a segment 0
spec = {SYMBOL: symbol, SEGMENT: 0}
if chunk_range is not None:
spec.update(CHUNKER_MAP[sym[CHUNKER]].to_mongo(chunk_range))
for x in self._collection.find(spec,
projection=[START, END],
sort=[(START, pymongo.ASCENDING if not reverse else pymongo.DESCENDING)]):
yield (c.chunk_to_str(x[START]), c.chunk_to_str(x[END]))
def iterator(self, symbol, chunk_range=None, **kwargs):
"""
Returns a generator that accesses each chunk in ascending order
Parameters
----------
symbol: str
the symbol for the given item in the DB
chunk_range: None, or a range object
allows you to subset the chunks by range
Returns
-------
generator
"""
sym = self._get_symbol_info(symbol)
if not sym:
raise NoDataFoundException("Symbol does not exist.")
c = CHUNKER_MAP[sym[CHUNKER]]
for chunk in list(self.get_chunk_ranges(symbol, chunk_range=chunk_range)):
yield self.read(symbol, chunk_range=c.to_range(chunk[0], chunk[1]), **kwargs)
def reverse_iterator(self, symbol, chunk_range=None, **kwargs):
"""
Returns a generator that accesses each chunk in descending order
Parameters
----------
symbol: str
the symbol for the given item in the DB
chunk_range: None, or a range object
allows you to subset the chunks by range
Returns
-------
generator
"""
sym = self._get_symbol_info(symbol)
if not sym:
raise NoDataFoundException("Symbol does not exist.")
c = CHUNKER_MAP[sym[CHUNKER]]
for chunk in list(self.get_chunk_ranges(symbol, chunk_range=chunk_range, reverse=True)):
yield self.read(symbol, chunk_range=c.to_range(chunk[0], chunk[1]), **kwargs)
def stats(self):
"""
Return storage statistics about the library
Returns
-------
dictionary of storage stats
"""
res = {}
db = self._collection.database
conn = db.connection
res['sharding'] = {}
try:
sharding = conn.config.databases.find_one({'_id': db.name})
if sharding:
res['sharding'].update(sharding)
res['sharding']['collections'] = list(conn.config.collections.find({'_id': {'$regex': '^' + db.name + r"\..*"}}))
except OperationFailure:
# Access denied
pass
res['dbstats'] = db.command('dbstats')
res['chunks'] = db.command('collstats', self._collection.name)
res['symbols'] = db.command('collstats', self._symbols.name)
res['metadata'] = db.command('collstats', self._mdata.name)
res['totals'] = {
'count': res['chunks']['count'],
'size': res['chunks']['size'] + res['symbols']['size'] + res['metadata']['size'],
}
return res
def has_symbol(self, symbol):
"""
Check if symbol exists in collection
Parameters
----------
symbol: str
The symbol to look up in the collection
Returns
-------
bool
"""
return self._get_symbol_info(symbol) is not None
| lgpl-2.1 |
JohanComparat/pySU | spm/bin_SMF/create_table_delta_mag.py | 1 | 8600 | import astropy.io.fits as fits
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
import numpy as n
import os
import sys
from scipy.stats import scoreatpercentile as sc
from scipy.interpolate import interp1d
survey = sys.argv[1]
z_min, z_max = 0., 1.6
imfs = ["Chabrier_ELODIE_", "Chabrier_MILES_", "Chabrier_STELIB_", "Kroupa_ELODIE_", "Kroupa_MILES_", "Kroupa_STELIB_", "Salpeter_ELODIE_", "Salpeter_MILES_", "Salpeter_STELIB_" ]
delta_mag_high = 2.5
delta_mag_low = 0.75
z_bins = n.array([0, 0.17, 0.55, 1.6])
SNR_keys = n.array([ 'g', 'r', 'i' ])
out_dir = os.path.join(os.environ['OBS_REPO'], 'spm', 'results')
#path_2_sdss_cat = os.path.join( os.environ['HOME'], 'SDSS', '26', 'catalogs', "FireFly_mag.fits" )
#path_2_eboss_cat = os.path.join( os.environ['HOME'], 'SDSS', 'v5_10_0', 'catalogs', "FireFly_mag.fits" )
path_2_sdss_cat = os.path.join( os.environ['OBS_REPO'], 'SDSS', '26', 'catalogs', "FireFly_mag.fits" )
path_2_eboss_cat = os.path.join( os.environ['OBS_REPO'], 'SDSS', 'v5_10_0', 'catalogs', "FireFly_mag.fits" )
# OPENS THE CATALOGS
print("Loads catalog")
if survey =='deep2':
deep2_dir = os.path.join(os.environ['OBS_REPO'], 'DEEP2')
path_2_deep2_cat = os.path.join( deep2_dir, "zcat.deep2.dr4.v4.LFcatalogTC.Planck13.spm.fits" )
catalog = fits.open(path_2_deep2_cat)[1].data
if survey =='sdss':
catalog = fits.open(path_2_sdss_cat)[1].data
z_name, z_err_name, class_name, zwarning = 'Z', 'Z_ERR', 'CLASS', 'ZWARNING'
if survey =='boss':
catalog = fits.open(path_2_eboss_cat)[1].data
z_name, z_err_name, class_name, zwarning = 'Z_NOQSO', 'Z_ERR_NOQSO', 'CLASS_NOQSO', 'ZWARNING_NOQSO'
IMF = imfs[0]
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
print(IMF, prf)
name, zflg_val, prefix = prf, 0., IMF
catalog_0 = (catalog[z_err_name] > 0.) & (catalog[z_name] > catalog[z_err_name]) & (catalog[class_name]=='GALAXY') & (catalog[zwarning]==zflg_val) & (catalog[z_name] > z_min) & (catalog[z_name] < z_max)
catalog_zOk = catalog_0 & (catalog['SNR_ALL']>0)
converged = (catalog_zOk)&(catalog[prefix+'stellar_mass'] < 10**13. ) & (catalog[prefix+'stellar_mass'] > 10**4 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] )
dex04 = (converged) & (catalog[prefix+'stellar_mass'] < 10**14. ) & (catalog[prefix+'stellar_mass'] > 0 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] ) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < delta_mag_high )
dex02 = (dex04) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < delta_mag_low )
delta_g = catalog['fiberMag_g'] - catalog['modelMag_g']
delta_r = catalog['fiberMag_r'] - catalog['modelMag_r']
delta_i = catalog['fiberMag_i'] - catalog['modelMag_i']
delta_m = n.array([delta_g, delta_r, delta_i])
#target_bits
program_names = n.array(list(set( catalog['PROGRAMNAME'] )))
program_names.sort()
sourcetypes = n.array(list(set( catalog['SOURCETYPE'] )))
sourcetypes.sort()
length = lambda selection : len(selection.nonzero()[0])
pcs_ref = n.arange(0., 101, 5)
g = lambda key, s1, pcs = pcs_ref : n.hstack(( length(s1), sc(catalog[key][s1], pcs) ))
sel_pg = lambda pgr : (catalog_zOk) & (catalog['PROGRAMNAME']==pgr)
sel_st = lambda pgr : (catalog_zOk) & (catalog['SOURCETYPE']==pgr)
sel0_pg = lambda pgr : (catalog_0) & (catalog['PROGRAMNAME']==pgr)
sel0_st = lambda pgr : (catalog_0) & (catalog['SOURCETYPE']==pgr)
all_galaxies = []
tpps = []
for pg in sourcetypes:
print(pg)
#sel_all = sel_st("GALAXY")
sel_all = sel_st(pg)
n_all = length( sel_all )
if n_all > 100 :
print(pg, n_all)
all_galaxies.append(n_all)
all_out = []
for dm, z_Min, z_Max in zip(delta_m, z_bins[:-1], z_bins[1:]):
s_z = sel_all &(catalog[z_name] >= z_Min) & (catalog[z_name] < z_Max)
n_z = length(s_z)
print(z_Min, z_Max, n_z)
if n_z > 0 :
over_all = (s_z) & ( dm>0 ) & (n.isnan(dm)==False)
ok = length( over_all )
#print('delta_m', ok) #, n.min(dm[over_all]), n.max(dm[over_all]))
#pc90 = length( (s_z) & ( dm<0.018 ) & ( dm>0 ))
pc10 = length( (over_all) & ( dm<delta_mag_low ))
pc01 = length( (over_all) & ( dm<delta_mag_high ))
print(pc10, pc01)
all_out.append( [n_z, ok, pc10, pc01] )
else :
all_out.append([0., 0., 0., 0.])
all_out = n.hstack((all_out))
print(n_all, all_out)
tpp = pg + " & " + str(int(n_all)) + " & " + " & ".join(n.array([ str(int(el)) for el in all_out]) ) + ' \\\\ \n'
print( tpp)
tpps.append(tpp)
all_galaxies = n.array(all_galaxies)
tpps = n.array(tpps)
ids = n.argsort(all_galaxies)[::-1]
out_file = os.path.join(os.environ['OBS_REPO'], 'spm', 'results', "table_comp_"+survey+"_snr_all_sourcetype_MAG_diffs.tex")
f=open(out_file, 'w')
#f.write('source type & N & \multicolumn{c}{2}{N galaxies} && \multicolumn{c}{2}{SNR ALL$>0$} & \\multicolumn{c}{2}{frefly converged} & \multicolumn{c}{2}{$\sigma_{\log_M}<0.4$} & \multicolumn{c}{2}{$\sigma_{\log_M}<0.2$} \\\\ \n')
#f.write(' & & N & % & & N & % & N & % & N & % \\\\ \n')
for jj in ids :
f.write( tpps[jj] )
f.close()
sys.exit()
#converged = (catalog_zOk)&(catalog[prefix+'stellar_mass'] < 10**13. ) & (catalog[prefix+'stellar_mass'] > 10**4 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] )
#dex04 = (converged) & (catalog[prefix+'stellar_mass'] < 10**14. ) & (catalog[prefix+'stellar_mass'] > 0 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low_1sig'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up_1sig'] ) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < delta_mag_high )
#dex02 = (dex04) & ( - n.log10(catalog[prefix+'stellar_mass_low_1sig']) + n.log10(catalog[prefix+'stellar_mass_up_1sig']) < delta_mag_low )
#m_catalog = n.log10(catalog[prefix+'stellar_mass'])
#w_catalog = n.ones_like(catalog[prefix+'stellar_mass'])
#print(ld(catalog_zOk))
#return name + " & $"+ sld(converged)+"$ ("+str(n.round(ld(converged)/ld(catalog_zOk)*100.,1))+") & $"+ sld(dex04)+"$ ("+str(n.round(ld(dex04)/ld(catalog_zOk)*100.,1))+") & $"+ sld(dex02)+ "$ ("+str(n.round(ld(dex02)/ld(catalog_zOk)*100.,1))+r") \\\\"
##return catalog_sel, m_catalog, w_catalog
sys.exit()
for IMF in imfs :
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
l2w = get_basic_stat_deep2(deep2, 'ZBEST', 'ZQUALITY', prf, 2., IMF, o2=False)
f.write(l2w + " \n")
f.write('\\hline \n')
#l2w = get_basic_stat_DR12(boss_12_portSF_kr, 'Z', 'Z_ERR', 'Portsmouth Kroupa Star-Forming & BOSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(boss_12_portPA_kr, 'Z', 'Z_ERR', 'Portsmouth Kroupa Passive & BOSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(boss_12_portSF_sa, 'Z', 'Z_ERR', 'Portsmouth Salpeter Star-Forming & BOSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(boss_12_portPA_sa, 'Z', 'Z_ERR', 'Portsmouth Salpeter Passive & BOSS & 12 ', 0.)
#f.write(l2w + " \n")
for IMF in imfs :
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
l2w = get_basic_stat_firefly_DR14(boss, 'Z_NOQSO', 'Z_ERR_NOQSO', 'CLASS_NOQSO', 'ZWARNING_NOQSO', prf, 0., IMF)
f.write(l2w + " \n")
f.write('\\hline \n')
#l2w = get_basic_stat_DR12(sdss_12_portSF_kr, 'Z', 'Z_ERR', 'Portsmouth Kroupa Star-Forming & SDSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(sdss_12_portPA_kr, 'Z', 'Z_ERR', 'Portsmouth Kroupa Passive & SDSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(sdss_12_portSF_sa, 'Z', 'Z_ERR', 'Portsmouth Salpeter Star-Forming & SDSS & 12 ', 0.)
#f.write(l2w + " \n")
#l2w = get_basic_stat_DR12(sdss_12_portPA_sa, 'Z', 'Z_ERR', 'Portsmouth Salpeter Passive & SDSS & 12 ', 0.)
#f.write(l2w + " \n")
for IMF in imfs :
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
l2w = get_basic_stat_firefly_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', prf, 0., IMF)
f.write(l2w + " \n")
f.write('\\hline \n')
f.close()
#"""
out_file = os.path.join(os.environ['OBS_REPO'], 'spm', 'results', "table_2_r.tex")
f=open(out_file, 'w')
for IMF in imfs :
prf = IMF.split('_')[0]+' & '+IMF.split('_')[1]
l2w = get_basic_stat_deep2(deep2, 'ZBEST', 'ZQUALITY', prf, 2., IMF, o2=True)
f.write(l2w + " \n")
f.close()
| cc0-1.0 |
Vijaysai005/KProject | vijay/DBSCAN/clustering/db/my_dbscan.py | 1 | 2986 | # usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 20 13:15:05 2017
@author: Vijayasai S
"""
# Use python3
import pandas as pd
def manual_DBSCAN(df, coll, maximum_distance):
#print (df)
"""
Parameters
----------
df : dataframe, required
The dataframe for cluster creation.
coll : collection table, required.
Give the table name to store the data in database.
maximum_distance : float, required
Give the maximum distance between two points in a cluster.
"""
#coll.drop()
df["timestamp"] = pd.to_datetime(df["timestamp"])
sorted_df = df.sort_values(by=["timestamp"], ascending=True)
curr_time_stamp = df["timestamp"][0]
sorted_df = df[0:df.shape[0]].sort_values(by=["rank"], ascending=True)
sorted_df = sorted_df.reset_index(drop=True)
pair_dist = [None] ; flag_dist = [None]
for k in range(sorted_df.shape[0] - 1):
pair_dist.append(sorted_df["distance_by_interval"][k] - sorted_df["distance_by_interval"][k+1])
if pair_dist[k+1] > maximum_distance:
flag_dist.append(1)
elif pair_dist[k+1] <= maximum_distance:
flag_dist.append(0)
pair = pd.Series(pair_dist)
flag = pd.Series(flag_dist)
sorted_df = sorted_df.assign(pair=pair.values, flag=flag.values)
cluster_number = [0 for m in range(sorted_df.shape[0])] ; clus_num = 1
######################################################################################################
# CLUSTER NUMBER PREDICTION
######################################################################################################
for n in range(sorted_df.shape[0]-1):
if n == 1 and sorted_df["flag"][n] == 1:
cluster_number[n-1] = "outlier"
elif n == 1 and sorted_df["flag"][n] == 0:
cluster_number[n-1] = clus_num
if sorted_df["flag"][n] == 1 and sorted_df["flag"][n+1] == 1:
cluster_number[n] = "outlier"
#clus_num += 1
if sorted_df["flag"][n] == 0:
cluster_number[n] = clus_num
elif sorted_df["flag"][n] == 1 and sorted_df["flag"][n+1] == 0:
clus_num += 1
cluster_number[n] = clus_num
if n == sorted_df.shape[0] - 2 and sorted_df["flag"][n+1] == 1:
cluster_number[n+1] = "outlier"
if n == sorted_df.shape[0] - 2 and sorted_df["flag"][n+1] == 0:
cluster_number[n+1] = clus_num
if 1 not in cluster_number:
for p in range(len(cluster_number)):
if cluster_number[p] != "outlier":
if cluster_number[p] > 1:
cluster_number[p] = cluster_number[p] - 1
######################################################################################################
clus_n = pd.Series(cluster_number)
sorted_df = sorted_df.assign(clus_n=clus_n.values)
ld = sorted_df.values.tolist()
#print (sorted_df.shape[0])
for a in range(sorted_df.shape[0]):
coll.insert([{"distance_by_interval":ld[a][0], "unit_id":ld[a][5], "rank":ld[a][3],"timestamp":ld[a][4],\
"flag":ld[a][6],"dist_diff":ld[a][7],"cluster_number":ld[a][8], "latitude":ld[a][1], "longitude":ld[a][2]}])
#print (cluster_number)
return
| gpl-3.0 |
Zeing/CaptchaReader | src/Control/Identify.py | 2 | 4946 | '''
Created on Aug 28, 2014
@author: Jason Guo E-mail: [email protected]
'''
import cStringIO
import datetime
from sklearn.externals import joblib
from PIL import Image
import config
class Identify():
'''
Usage: to identify the captcha.
Input: the string of captcha image
Output: the string of captcha content
'''
def __init__(self, captcha):
'''
Constructor
'''
self.captcha = captcha
# Get the module
self.clf = joblib.load(config.DATA_FILE_NAME)
def captcha_reader(self):
starttime = datetime.datetime.now()
# Binarization
captcha_file = cStringIO.StringIO(self.captcha)
img = Image.open(captcha_file)
img = img.convert("RGBA")
black_pix = range(img.size[0])
pixdata = img.load()
for y in xrange(img.size[1]):
for x in xrange(img.size[0]):
if pixdata[x, y][0] < 90 or pixdata[x, y][1] < 162:
pixdata[x, y] = (0, 0, 0, 255)
if pixdata[x, y][2] > 0:
pixdata[x, y] = (255, 255, 255, 255)
else:
pixdata[x, y] = (0, 0, 0, 255)
endtime = datetime.datetime.now()
interval = endtime - starttime
print "%.5f," % (interval.seconds + interval.microseconds / 1000000.0),
starttime = datetime.datetime.now()
# Split figure
for x in xrange(img.size[0]):
row_black_pix = 0
for y in xrange(img.size[1]):
if pixdata[x, y] == (0, 0, 0, 255):
row_black_pix += 1
black_pix[x] = row_black_pix
split_position = []
for i in xrange(1, img.size[0]):
if black_pix[i] != 0 and black_pix[i - 1] == 0:
if len(split_position) % 2 == 0:
split_position.append(i)
elif black_pix[i] == 0 and black_pix[i - 1] != 0:
if i - 1 - split_position[-1] >= 6:
split_position.append(i - 1)
if split_position[1] > 17:
self.insert_index(1, 10, 16, black_pix, split_position)
if split_position[3] > 27:
self.insert_index(3, 20, 26, black_pix, split_position)
if split_position[5] > 37:
self.insert_index(5, 30, 36, black_pix, split_position)
if len(split_position) != 8:
return "alreadfail"
endtime = datetime.datetime.now()
interval = endtime - starttime
print "%.5f," % (interval.seconds + interval.microseconds / 1000000.0),
starttime = datetime.datetime.now()
# Identify figure
result = ""
identify_list = black_pix[split_position[0] : split_position[1] + 1]
identity_len = len(identify_list)
while identity_len < 11:
identify_list.append(0)
identity_len += 1
while identity_len > 11:
identify_list.pop()
identity_len -= 1
result += self.clf.predict([identify_list])[0]
identify_list = black_pix[split_position[2] : split_position[3] + 1]
identity_len = len(identify_list)
while identity_len < 11:
identify_list.append(0)
identity_len += 1
while identity_len > 11:
identify_list.pop()
identity_len -= 1
result += self.clf.predict([identify_list])[0]
identify_list = black_pix[split_position[4] : split_position[5] + 1]
identity_len = len(identify_list)
while identity_len < 11:
identify_list.append(0)
identity_len += 1
while identity_len > 11:
identify_list.pop()
identity_len -= 1
result += self.clf.predict([identify_list])[0]
identify_list = black_pix[split_position[6] : split_position[7] + 1]
identity_len = len(identify_list)
while identity_len < 11:
identify_list.append(0)
identity_len += 1
while identity_len > 11:
identify_list.pop()
identity_len -= 1
result += self.clf.predict([identify_list])[0]
endtime = datetime.datetime.now()
interval = endtime - starttime
print "%.5f," % (interval.seconds + interval.microseconds / 1000000.0),
return result
def insert_index(self, index, low, high, black_pix, split_position):
min_index = 0
min_value = 25
if split_position[index] > high:
for i in range(low, high):
if min_value > black_pix[i]:
min_value = black_pix[i]
min_index = i
split_position.insert(index, min_index)
split_position.insert(index + 1, min_index + 1)
| mit |
planetarymike/IDL-Colorbars | IDL_py_test/051_CB-BuPu.py | 1 | 8627 | from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[0.968627, 0.988235, 0.992157],
[0.964706, 0.984314, 0.992157],
[0.964706, 0.984314, 0.988235],
[0.960784, 0.980392, 0.988235],
[0.956863, 0.980392, 0.988235],
[0.952941, 0.976471, 0.988235],
[0.952941, 0.976471, 0.984314],
[0.94902, 0.972549, 0.984314],
[0.945098, 0.972549, 0.984314],
[0.941176, 0.968627, 0.980392],
[0.941176, 0.968627, 0.980392],
[0.937255, 0.964706, 0.980392],
[0.933333, 0.964706, 0.980392],
[0.929412, 0.960784, 0.976471],
[0.929412, 0.960784, 0.976471],
[0.92549, 0.956863, 0.976471],
[0.921569, 0.956863, 0.972549],
[0.917647, 0.952941, 0.972549],
[0.917647, 0.952941, 0.972549],
[0.913725, 0.94902, 0.968627],
[0.909804, 0.94902, 0.968627],
[0.905882, 0.945098, 0.968627],
[0.905882, 0.945098, 0.968627],
[0.901961, 0.941176, 0.964706],
[0.898039, 0.941176, 0.964706],
[0.894118, 0.937255, 0.964706],
[0.894118, 0.937255, 0.960784],
[0.890196, 0.933333, 0.960784],
[0.886275, 0.933333, 0.960784],
[0.882353, 0.929412, 0.960784],
[0.882353, 0.929412, 0.956863],
[0.878431, 0.92549, 0.956863],
[0.87451, 0.921569, 0.956863],
[0.870588, 0.917647, 0.952941],
[0.866667, 0.917647, 0.952941],
[0.862745, 0.913725, 0.94902],
[0.858824, 0.909804, 0.94902],
[0.854902, 0.905882, 0.945098],
[0.85098, 0.905882, 0.945098],
[0.847059, 0.901961, 0.945098],
[0.843137, 0.898039, 0.941176],
[0.839216, 0.894118, 0.941176],
[0.835294, 0.890196, 0.937255],
[0.831373, 0.890196, 0.937255],
[0.827451, 0.886275, 0.933333],
[0.823529, 0.882353, 0.933333],
[0.819608, 0.878431, 0.929412],
[0.815686, 0.878431, 0.929412],
[0.807843, 0.87451, 0.929412],
[0.803922, 0.870588, 0.92549],
[0.8, 0.866667, 0.92549],
[0.796078, 0.862745, 0.921569],
[0.792157, 0.862745, 0.921569],
[0.788235, 0.858824, 0.917647],
[0.784314, 0.854902, 0.917647],
[0.780392, 0.85098, 0.917647],
[0.776471, 0.847059, 0.913725],
[0.772549, 0.847059, 0.913725],
[0.768627, 0.843137, 0.909804],
[0.764706, 0.839216, 0.909804],
[0.760784, 0.835294, 0.905882],
[0.756863, 0.835294, 0.905882],
[0.752941, 0.831373, 0.901961],
[0.74902, 0.827451, 0.901961],
[0.745098, 0.823529, 0.901961],
[0.741176, 0.823529, 0.898039],
[0.737255, 0.819608, 0.898039],
[0.733333, 0.815686, 0.898039],
[0.729412, 0.811765, 0.894118],
[0.72549, 0.811765, 0.894118],
[0.721569, 0.807843, 0.890196],
[0.717647, 0.803922, 0.890196],
[0.713725, 0.803922, 0.890196],
[0.709804, 0.8, 0.886275],
[0.705882, 0.796078, 0.886275],
[0.701961, 0.792157, 0.886275],
[0.698039, 0.792157, 0.882353],
[0.694118, 0.788235, 0.882353],
[0.690196, 0.784314, 0.878431],
[0.686275, 0.784314, 0.878431],
[0.678431, 0.780392, 0.878431],
[0.67451, 0.776471, 0.87451],
[0.670588, 0.772549, 0.87451],
[0.666667, 0.772549, 0.87451],
[0.662745, 0.768627, 0.870588],
[0.658824, 0.764706, 0.870588],
[0.654902, 0.760784, 0.866667],
[0.65098, 0.760784, 0.866667],
[0.647059, 0.756863, 0.866667],
[0.643137, 0.752941, 0.862745],
[0.639216, 0.752941, 0.862745],
[0.635294, 0.74902, 0.862745],
[0.631373, 0.745098, 0.858824],
[0.627451, 0.741176, 0.858824],
[0.623529, 0.741176, 0.854902],
[0.619608, 0.737255, 0.854902],
[0.615686, 0.733333, 0.85098],
[0.615686, 0.729412, 0.85098],
[0.611765, 0.721569, 0.847059],
[0.611765, 0.717647, 0.847059],
[0.607843, 0.713725, 0.843137],
[0.607843, 0.709804, 0.839216],
[0.603922, 0.705882, 0.839216],
[0.603922, 0.701961, 0.835294],
[0.6, 0.694118, 0.831373],
[0.596078, 0.690196, 0.831373],
[0.596078, 0.686275, 0.827451],
[0.592157, 0.682353, 0.827451],
[0.592157, 0.678431, 0.823529],
[0.588235, 0.670588, 0.819608],
[0.588235, 0.666667, 0.819608],
[0.584314, 0.662745, 0.815686],
[0.580392, 0.658824, 0.811765],
[0.580392, 0.654902, 0.811765],
[0.576471, 0.647059, 0.807843],
[0.576471, 0.643137, 0.807843],
[0.572549, 0.639216, 0.803922],
[0.572549, 0.635294, 0.8],
[0.568627, 0.631373, 0.8],
[0.568627, 0.627451, 0.796078],
[0.564706, 0.619608, 0.792157],
[0.560784, 0.615686, 0.792157],
[0.560784, 0.611765, 0.788235],
[0.556863, 0.607843, 0.788235],
[0.556863, 0.603922, 0.784314],
[0.552941, 0.596078, 0.780392],
[0.552941, 0.592157, 0.780392],
[0.54902, 0.588235, 0.776471],
[0.54902, 0.584314, 0.772549],
[0.54902, 0.576471, 0.772549],
[0.54902, 0.572549, 0.768627],
[0.54902, 0.568627, 0.764706],
[0.54902, 0.560784, 0.764706],
[0.54902, 0.556863, 0.760784],
[0.54902, 0.552941, 0.756863],
[0.54902, 0.545098, 0.756863],
[0.54902, 0.541176, 0.752941],
[0.54902, 0.537255, 0.74902],
[0.54902, 0.529412, 0.74902],
[0.54902, 0.52549, 0.745098],
[0.54902, 0.521569, 0.741176],
[0.54902, 0.513725, 0.741176],
[0.54902, 0.509804, 0.737255],
[0.54902, 0.505882, 0.737255],
[0.54902, 0.498039, 0.733333],
[0.54902, 0.494118, 0.729412],
[0.54902, 0.486275, 0.729412],
[0.54902, 0.482353, 0.72549],
[0.54902, 0.478431, 0.721569],
[0.54902, 0.470588, 0.721569],
[0.54902, 0.466667, 0.717647],
[0.54902, 0.462745, 0.713725],
[0.54902, 0.454902, 0.713725],
[0.54902, 0.45098, 0.709804],
[0.54902, 0.447059, 0.705882],
[0.54902, 0.439216, 0.705882],
[0.54902, 0.435294, 0.701961],
[0.54902, 0.431373, 0.698039],
[0.54902, 0.423529, 0.698039],
[0.54902, 0.419608, 0.694118],
[0.54902, 0.415686, 0.690196],
[0.54902, 0.407843, 0.690196],
[0.54902, 0.403922, 0.686275],
[0.54902, 0.4, 0.686275],
[0.545098, 0.392157, 0.682353],
[0.545098, 0.388235, 0.678431],
[0.545098, 0.384314, 0.678431],
[0.545098, 0.380392, 0.67451],
[0.545098, 0.372549, 0.670588],
[0.545098, 0.368627, 0.670588],
[0.545098, 0.364706, 0.666667],
[0.545098, 0.356863, 0.666667],
[0.541176, 0.352941, 0.662745],
[0.541176, 0.34902, 0.658824],
[0.541176, 0.341176, 0.658824],
[0.541176, 0.337255, 0.654902],
[0.541176, 0.333333, 0.65098],
[0.541176, 0.32549, 0.65098],
[0.541176, 0.321569, 0.647059],
[0.541176, 0.317647, 0.647059],
[0.537255, 0.309804, 0.643137],
[0.537255, 0.305882, 0.639216],
[0.537255, 0.301961, 0.639216],
[0.537255, 0.298039, 0.635294],
[0.537255, 0.290196, 0.631373],
[0.537255, 0.286275, 0.631373],
[0.537255, 0.282353, 0.627451],
[0.537255, 0.27451, 0.627451],
[0.533333, 0.270588, 0.623529],
[0.533333, 0.266667, 0.619608],
[0.533333, 0.258824, 0.619608],
[0.533333, 0.254902, 0.615686],
[0.533333, 0.247059, 0.611765],
[0.533333, 0.243137, 0.607843],
[0.529412, 0.235294, 0.603922],
[0.529412, 0.231373, 0.6],
[0.529412, 0.223529, 0.596078],
[0.529412, 0.219608, 0.592157],
[0.52549, 0.211765, 0.588235],
[0.52549, 0.207843, 0.584314],
[0.52549, 0.2, 0.580392],
[0.52549, 0.192157, 0.576471],
[0.52549, 0.188235, 0.572549],
[0.521569, 0.180392, 0.568627],
[0.521569, 0.176471, 0.564706],
[0.521569, 0.168627, 0.560784],
[0.521569, 0.164706, 0.556863],
[0.521569, 0.156863, 0.552941],
[0.517647, 0.14902, 0.545098],
[0.517647, 0.145098, 0.541176],
[0.517647, 0.137255, 0.537255],
[0.517647, 0.133333, 0.533333],
[0.513725, 0.12549, 0.529412],
[0.513725, 0.121569, 0.52549],
[0.513725, 0.113725, 0.521569],
[0.513725, 0.109804, 0.517647],
[0.513725, 0.101961, 0.513725],
[0.509804, 0.0941176, 0.509804],
[0.509804, 0.0901961, 0.505882],
[0.509804, 0.0823529, 0.501961],
[0.509804, 0.0784314, 0.498039],
[0.505882, 0.0705882, 0.494118],
[0.505882, 0.0666667, 0.490196],
[0.505882, 0.0588235, 0.486275],
[0.498039, 0.0588235, 0.478431],
[0.494118, 0.054902, 0.47451],
[0.486275, 0.054902, 0.466667],
[0.482353, 0.0509804, 0.462745],
[0.47451, 0.0509804, 0.454902],
[0.466667, 0.0470588, 0.45098],
[0.462745, 0.0470588, 0.443137],
[0.454902, 0.0431373, 0.439216],
[0.447059, 0.0431373, 0.431373],
[0.443137, 0.0392157, 0.427451],
[0.435294, 0.0392157, 0.419608],
[0.431373, 0.0352941, 0.415686],
[0.423529, 0.0352941, 0.407843],
[0.415686, 0.0313725, 0.403922],
[0.411765, 0.0313725, 0.396078],
[0.403922, 0.0313725, 0.392157],
[0.396078, 0.027451, 0.384314],
[0.392157, 0.027451, 0.376471],
[0.384314, 0.0235294, 0.372549],
[0.380392, 0.0235294, 0.364706],
[0.372549, 0.0196078, 0.360784],
[0.364706, 0.0196078, 0.352941],
[0.360784, 0.0156863, 0.34902],
[0.352941, 0.0156863, 0.341176],
[0.345098, 0.0117647, 0.337255],
[0.341176, 0.0117647, 0.329412],
[0.333333, 0.00784314, 0.32549],
[0.329412, 0.00784314, 0.317647],
[0.321569, 0.00392157, 0.313725],
[0.313725, 0.00392157, 0.305882],
[0.309804, 0., 0.301961],
[0.301961, 0., 0.294118]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| gpl-2.0 |
parejkoj/dust | radprofile.py | 2 | 6067 |
import numpy as np
from astropy.io import fits
from astropy.io import ascii
import errors as err
## November 30, 2014 : Removed dependence on matplotlib and asciidata
## April 1, 2013 : Added copy function to Profile object
## March 29, 2013 : Updated minus, plus, divide, multiply with error propagating routine (errors.py)
## March 2, 2013 : Updated Profile object with minus, plus, divide, multiply
## Part of radprofile.sh script
## Taken from CygX-3/6601/primary
## Plots a profile when used './radprofile.py rp_filename'
## where the '.txt' extension is missing from rp_filename
import os # Needed for environment variables
import sys
sys.argv
#----------------------------------------------
## The Profile object
class Profile(object):
rleft = 0.0
rright = 0.0
surbri = 0.0
surbri_err = 0.0
@property
def rmid( self ):
return 0.5 * (self.rleft + self.rright)
@property
def area( self ):
return np.pi * (self.rright**2 - self.rleft**2) # pix^2
def __getslice__( self, i,j ):
result = Profile()
result.rleft = self.rleft[i:j]
result.rright = self.rright[i:j]
result.surbri = self.surbri[i:j]
result.surbri_err = self.surbri_err[i:j]
return result
def __getitem__( self, ivals ):
result = Profile()
result.rleft = self.rleft[ivals]
result.rright = self.rright[ivals]
result.surbri = self.surbri[ivals]
result.surbri_err = self.surbri_err[ivals]
return result
def minus( self, value, value_err=0 ):
oldsb = self.surbri
oldsb_err = self.surbri_err
self.surbri = oldsb - value
self.surbri_err = err.prop_add( oldsb_err, value_err )
#self.surbri_err = np.sqrt( oldsb_err**2 + value_err**2 )
return
def plus( self, value, value_err=0 ):
oldsb = self.surbri
oldsb_err = self.surbri_err
self.surbri = oldsb + value
self.surbri_err = err.prop_add( oldsb_err, value_err )
#self.surbri_err = np.sqrt( oldsb_err**2 + value_err**2 )
return
def divide( self, value, value_err=0 ):
oldsb = self.surbri
oldsb_err = self.surbri_err
self.surbri = oldsb / value
self.surbri_err = err.prop_div( oldsb, value, oldsb_err, value_err )
#self.surbri_err = oldsb_err*2 / value
return
def multiply( self, value, value_err=0 ):
oldsb = self.surbri
oldsb_err = self.surbri_err
self.surbri = oldsb * value
self.surbri_err = err.prop_mult( oldsb, value, oldsb_err, value_err )
#self.surbri_err = oldsb_err*2 * value
return
def write( self, filename, indices='all', sci_note=False ):
if indices == 'all':
indices = range(len(self.rmid))
FORMAT = "%f \t%f \t%f \t%f\n"
if sci_note:
FORMAT = "%e \t%e \t%e \t%e\n"
f = open(filename, 'w')
f.write( "# Bin_left\tBin_right\tSurbri\tSurbri_err\n" )
for i in indices:
f.write( FORMAT % \
(self.rleft[i], self.rright[i], self.surbri[i], self.surbri_err[i]) )
f.close()
return
#----------------------------------------------
## Useful functions
def copy_profile( profile ):
result = Profile()
result.rleft = np.array( profile.rleft )
result.rright = np.array( profile.rright )
result.surbri = np.array( profile.surbri )
result.surbri_err = np.array( profile.surbri_err )
return result
def get_profile_fits( filename, flux=False ):
result = Profile()
if flux:
sb_key = 'SUR_FLUX' # phot/cm^2/s/pix^2
sberr_key = 'SUR_FLUX_ERR'
else:
sb_key = 'SUR_BRI' # count/pix^2
sberr_key = 'SUR_BRI_ERR'
hdu_list = fits.open( filename )
data = hdu_list[1].data
result.rleft = data['R'][:,0]
result.rright = data['R'][:,1]
result.surbri = data[sb_key]
result.surbri_err = data[sberr_key]
return result
def get_profile( filename ):
result = Profile()
data = ascii.read( filename )
keys = data.keys()
result.rleft = data[keys[0]]
result.rright = data[keys[1]]
result.surbri = data[keys[2]]
result.surbri_err = data[keys[3]]
return result
def add_profile( profile1, profile2=Profile(), weight1=1.0, weight2=1.0 ):
result = Profile()
# if profile1.rleft != profile2.rleft or profile1.rright != profile2.rright:
# print 'Error: Profile bins need to match up'
# return
result.surbri = profile1.surbri * weight1 + profile2.surbri * weight2
result.surbri_err = np.sqrt( profile1.surbri_err**2 * weight1**2 + profile2.surbri_err**2 * weight2**2 )
result.rleft = profile1.rleft
result.rright = profile1.rright
return result
def make_bkg_profile( template, bkg_value, bkg_err=0.0 ):
result = Profile()
result.rleft = template.rleft
result.rright = template.rright
result.surbri = np.zeros( len(template.rleft) ) + bkg_value
result.surbri_err = np.zeros( len(template.rleft) ) + bkg_err
return result
#----------------------------------------------
## Added Feb 5, 2013 : More useful functions
def add_bkg( profile, bkg_counts, bkg_area ):
## To subtract, put a - sign before bkg_counts
bkg_surbri = bkg_counts / bkg_area
bkg_err = np.sqrt( bkg_counts ) / bkg_area
sbnew = profile.surbri + bkg_surbri
sbnew_err = np.sqrt( profile.surbri_err**2 + bkg_err**2 )
profile.surbri = sbnew
profile.surbri_err = sbnew_err
return
def residual_profile( profile, model ):
result = Profile()
result.rleft = np.array(profile.rleft)
result.rright = np.array(profile.rright)
result.surbri = profile.surbri - model.surbri
result.surbri_err = np.sqrt( profile.surbri_err**2 + model.surbri_err**2 )
return result
#----------------------------------------------
try:
datafile = sys.argv[1]
profile = get_profile( datafile )
except:
pass
| bsd-2-clause |
lambday/shogun | examples/undocumented/python/graphical/so_multiclass_director_BMRM.py | 11 | 4332 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from shogun import RealFeatures
from shogun import MulticlassModel, MulticlassSOLabels, RealNumber, DualLibQPBMSOSVM, DirectorStructuredModel
from shogun import BMRM, PPBMRM, P3BMRM, ResultSet, RealVector
from shogun import StructuredAccuracy
class MulticlassStructuredModel(DirectorStructuredModel):
def __init__(self,features,labels):
DirectorStructuredModel.__init__(self)
self.set_features(features)
self.set_labels(labels)
self.dim = features.get_dim_feature_space()*labels.get_num_classes()
self.n_classes = labels.get_num_classes()
self.n_feats = features.get_dim_feature_space()
#self.use_director_risk()
def get_dim(self):
return self.dim
def argmax(self,w,feat_idx,training):
feature_vector = self.get_features().get_feature_vector(feat_idx)
label = None
if training == True:
label = int(RealNumber.obtain_from_generic(self.get_labels().get_label(feat_idx)).value)
ypred = 0
max_score = -1e10
for c in xrange(self.n_classes):
score = 0.0
for i in xrange(self.n_feats):
score += w[i+self.n_feats*c]*feature_vector[i]
if training == True:
score += (c!=label)
if score > max_score:
max_score = score
ypred = c
res = ResultSet()
res.score = max_score
res.psi_pred = RealVector(self.dim)
res.psi_pred.zero()
for i in xrange(self.n_feats):
res.psi_pred[i+self.n_feats*ypred] = feature_vector[i]
res.argmax = RealNumber(ypred)
if training == True:
res.delta = (label!=ypred)
res.psi_truth = RealVector(self.dim)
res.psi_truth.zero()
for i in xrange(self.n_feats):
res.psi_truth[i+self.n_feats*label] = feature_vector[i]
for i in xrange(self.n_feats):
res.score -= w[i+self.n_feats*label]*feature_vector[i]
return res
def fill_data(cnt, minv, maxv):
x1 = np.linspace(minv, maxv, cnt)
a, b = np.meshgrid(x1, x1)
X = np.array((np.ravel(a), np.ravel(b)))
y = np.zeros((1, cnt*cnt))
tmp = cnt*cnt;
y[0, tmp/3:(tmp/3)*2]=1
y[0, tmp/3*2:(tmp/3)*3]=2
return X, y.flatten()
def gen_data():
covs = np.array([[[0., -1. ], [2.5, .7]],
[[3., -1.5], [1.2, .3]],
[[ 2, 0 ], [ .0, 1.5 ]]])
X = np.r_[np.dot(np.random.randn(N, dim), covs[0]) + np.array([0, 10]),
np.dot(np.random.randn(N, dim), covs[1]) + np.array([-10, -10]),
np.dot(np.random.randn(N, dim), covs[2]) + np.array([10, -10])];
Y = np.hstack((np.zeros(N), np.ones(N), 2*np.ones(N)))
return X, Y
def get_so_labels(out):
N = out.get_num_labels()
l = np.zeros(N)
for i in xrange(N):
l[i] = RealNumber.obtain_from_generic(out.get_label(i)).value
return l
# Number of classes
M = 3
# Number of samples of each class
N = 10
# Dimension of the data
dim = 2
X, y = gen_data()
cnt = 50
X2, y2 = fill_data(cnt, np.min(X), np.max(X))
labels = MulticlassSOLabels(y)
features = RealFeatures(X.T)
model = MulticlassStructuredModel(features, labels)
lambda_ = 1e1
sosvm = DualLibQPBMSOSVM(model, labels, lambda_)
sosvm.set_cleanAfter(10) # number of iterations that cutting plane has to be inactive for to be removed
sosvm.set_cleanICP(True) # enables inactive cutting plane removal feature
sosvm.set_TolRel(0.001) # set relative tolerance
sosvm.set_verbose(True) # enables verbosity of the solver
sosvm.set_cp_models(16) # set number of cutting plane models
sosvm.set_solver(BMRM) # select training algorithm
#sosvm.set_solver(PPBMRM)
#sosvm.set_solver(P3BMRM)
sosvm.train()
res = sosvm.get_result()
Fps = res.get_hist_Fp_vector()
Fds = res.get_hist_Fd_vector()
wdists = res.get_hist_wdist_vector()
plt.figure()
plt.subplot(221)
plt.title('Fp and Fd history')
plt.plot(xrange(res.get_n_iters()), Fps, hold=True)
plt.plot(xrange(res.get_n_iters()), Fds, hold=True)
plt.subplot(222)
plt.title('w dist history')
plt.plot(xrange(res.get_n_iters()), wdists)
# Evaluation
out = sosvm.apply()
Evaluation = StructuredAccuracy()
acc = Evaluation.evaluate(out, labels)
print "Correct classification rate: %0.4f%%" % ( 100.0*acc )
# show figure
Z = get_so_labels(sosvm.apply(RealFeatures(X2)))
x = (X2[0,:]).reshape(cnt, cnt)
y = (X2[1,:]).reshape(cnt, cnt)
z = Z.reshape(cnt, cnt)
plt.subplot(223)
plt.pcolor(x, y, z)
plt.contour(x, y, z, linewidths=1, colors='black', hold=True)
plt.plot(X[:,0], X[:,1], 'yo')
plt.axis('tight')
plt.title('Classification')
plt.show()
| bsd-3-clause |
google-code-export/currentcostgui | currentcostgraphs.py | 9 | 7933 | # -*- coding: utf-8 -*-
#
# CurrentCost GUI
#
# Copyright (C) 2008 Dale Lane
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The author of this code can be contacted at [email protected]
# Any contact about this application is warmly welcomed.
#
import wx
import wx.aui
import matplotlib as mpl
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as Canvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx, _load_bitmap
from matplotlib.dates import DayLocator, HourLocator, MonthLocator, YearLocator, WeekdayLocator, DateFormatter, drange
from matplotlib.patches import Rectangle, Patch
from matplotlib.text import Text
#
# Implements the tabs we use in the GUI - either to draw a graph, or a TextPage
# for the 'trends' page.
#
# Also includes a custom toolbar for use with Matplotlib graphs
#
# Dale Lane (http://dalelane.co.uk/blog)
class Plot(wx.Panel):
def __init__(self, parent, id = -1, dpi = None, **kwargs):
wx.Panel.__init__(self, parent, id=id, **kwargs)
self.figure = mpl.figure.Figure(dpi=dpi, figsize=(2,2))
self.canvas = Canvas(self, -1, self.figure)
self.toolbar = Toolbar(self.canvas)
self.toolbar.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas,1,wx.EXPAND)
sizer.Add(self.toolbar, 0 , wx.LEFT | wx.EXPAND)
self.SetSizer(sizer)
class PlotNotebook(wx.Panel):
def __init__(self, parent, id = -1):
# parent is a frame --> MyFrame (wx.Frame)
wx.Panel.__init__(self, parent, id=id)
self.nb = wx.aui.AuiNotebook(self, style=wx.aui.AUI_NB_TAB_MOVE)
sizer = wx.BoxSizer()
sizer.Add(self.nb, 1, wx.EXPAND)
self.SetSizer(sizer)
def add(self,name="plot"):
page = Plot(self.nb)
self.nb.AddPage(page,name)
return page.figure
def deletepage(self,pagename):
for i in range(0, self.nb.GetPageCount()):
if self.nb.GetPageText(i) == pagename:
self.nb.DeletePage(i)
return
def selectpage(self,pagename):
for i in range(0, self.nb.GetPageCount()):
if self.nb.GetPageText(i) == pagename:
self.nb.SetSelection(i)
return
def addtextpage(self,name):
page = TextPage(self.nb)
self.nb.AddPage(page,name)
return page
#
# we override the matplotlib toolbar class to remove the subplots function,
# which we do not use
#
class Toolbar(NavigationToolbar2Wx):
ON_CUSTOM_LEFT = wx.NewId()
ON_CUSTOM_RIGHT = wx.NewId()
# rather than copy and edit the whole (rather large) init function, we run
# the super-classes init function as usual, then go back and delete the
# button we don't want
def __init__(self, plotCanvas):
CONFIGURE_SUBPLOTS_TOOLBAR_BTN_POSITION = 6
NavigationToolbar2Wx.__init__(self, plotCanvas)
# delete the toolbar button we don't want
self.DeleteToolByPos(CONFIGURE_SUBPLOTS_TOOLBAR_BTN_POSITION)
# add the new toolbar buttons that we do want
self.AddSimpleTool(self.ON_CUSTOM_LEFT, _load_bitmap('stock_left.xpm'),
'Pan to the left', 'Pan graph to the left')
wx.EVT_TOOL(self, self.ON_CUSTOM_LEFT, self._on_custom_pan_left)
self.AddSimpleTool(self.ON_CUSTOM_RIGHT, _load_bitmap('stock_right.xpm'),
'Pan to the right', 'Pan graph to the right')
wx.EVT_TOOL(self, self.ON_CUSTOM_RIGHT, self._on_custom_pan_right)
# in theory this should never get called, because we delete the toolbar
# button that calls it. but in case it does get called (e.g. if there
# is a keyboard shortcut I don't know about) then we override the method
# that gets called - to protect against the exceptions that it throws
def configure_subplot(self, evt):
print 'ERROR: This application does not support subplots'
# pan the graph to the left
def _on_custom_pan_left(self, evt):
ONE_SCREEN = 7 # we default to 1 week
axes = self.canvas.figure.axes[0]
x1,x2 = axes.get_xlim()
ONE_SCREEN = x2 - x1
axes.set_xlim(x1 - ONE_SCREEN, x2 - ONE_SCREEN)
self.canvas.draw()
# pan the graph to the right
def _on_custom_pan_right(self, evt):
ONE_SCREEN = 7 # we default to 1 week
axes = self.canvas.figure.axes[0]
x1,x2 = axes.get_xlim()
ONE_SCREEN = x2 - x1
axes.set_xlim(x1 + ONE_SCREEN, x2 + ONE_SCREEN)
self.canvas.draw()
#
# a GUI tab that we can write text to
#
# used to implement the 'trends' page in the GUI
#
# includes a helper function to update the text displayed on this page
#
class TextPage(wx.Panel):
def __init__(self, parent, id = -1, dpi = None, **kwargs):
wx.Panel.__init__(self, parent, id=id, **kwargs)
#
self.text = wx.StaticText(self, -1, "Your CurrentCost data", wx.Point(30, 20))
self.text.SetFont(wx.Font(13, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.text.SetSize(self.text.GetBestSize())
#
self.trend1 = wx.StaticText(self, -1, "will be described here after data is received...", wx.Point(35, 80))
self.trend1.SetFont(wx.Font(11, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
self.trend1.SetSize(self.trend1.GetBestSize())
#
self.trend2 = wx.StaticText(self, -1, " ",wx.Point(35, 120))
self.trend2.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.trend2.SetSize(self.trend2.GetBestSize())
#
self.trend3 = wx.StaticText(self, -1, " ",wx.Point(35, 160))
self.trend3.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.trend3.SetSize(self.trend3.GetBestSize())
#
self.trend4 = wx.StaticText(self, -1, " ",wx.Point(35, 200))
self.trend4.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.trend4.SetSize(self.trend4.GetBestSize())
#
self.trend5 = wx.StaticText(self, -1, " ",wx.Point(35, 240))
self.trend5.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.trend5.SetSize(self.trend5.GetBestSize())
#
self.trend6 = wx.StaticText(self, -1, " ",wx.Point(35, 280))
self.trend6.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.trend6.SetSize(self.trend6.GetBestSize())
#
self.figure = mpl.figure.Figure(dpi=dpi, figsize=(2,2))
def UpdateTrendText(self, trendnum, trendtext):
if trendnum == 1:
self.trend1.SetLabel(trendtext)
self.trend1.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.trend1.SetSize(self.trend1.GetBestSize())
elif trendnum == 2:
self.trend2.SetLabel(trendtext)
elif trendnum == 3:
self.trend3.SetLabel(trendtext)
elif trendnum == 4:
self.trend4.SetLabel(trendtext)
elif trendnum == 5:
self.trend5.SetLabel(trendtext)
elif trendnum == 6:
self.trend6.SetLabel(trendtext)
| gpl-3.0 |
eggplantbren/Flotsam | src/Python/emcee/TDModel.py | 1 | 4258 | import numpy as np
import numpy.random as rng
import scipy.linalg as la
import matplotlib.pyplot as plt
from Data import Data
class Limits:
"""
A singleton class that just holds prior bounds
"""
@staticmethod
def initialise(data):
# Mean magnitudes
Limits.magMin = data.yMean - 10.*data.yStDev
Limits.magMax = data.yMean + 10.*data.yStDev
Limits.magRange = Limits.magMax - Limits.magMin
# Time delays
Limits.tauMin = -data.tRange
Limits.tauMax = data.tRange
Limits.tauRange = Limits.tauMax - Limits.tauMin
# Microlensing
Limits.logSig_mlMin = np.log(1E-2*data.yStDev)
Limits.logSig_mlMax = np.log(1E2*data.yStDev)
Limits.logSig_mlRange = Limits.logSig_mlMax\
- Limits.logSig_mlMin
Limits.alpha_mlMin = 1.
Limits.alpha_mlMax = 2.
Limits.alpha_mlRange = Limits.alpha_mlMax\
- Limits.alpha_mlMin
# QSO Variability
Limits.logSig_qsoMin = np.log(1E-2*data.yStDev).mean()
Limits.logSig_qsoMax = np.log(1E2*data.yStDev).mean()
Limits.logSig_qsoRange = Limits.logSig_qsoMax\
- Limits.logSig_qsoMin
Limits.logTau_qsoMin = np.log(1E-2*data.tRange)
Limits.logTau_qsoMax = np.log(1E3*data.tRange)
Limits.logTau_qsoRange = Limits.logTau_qsoMax\
- Limits.logTau_qsoMin
class TDModel:
"""
An object of this class is a point in the Flotsam parameter space
for inferring time delays in the presence of microlensing.
"""
def __init__(self, numImages):
self.numImages = numImages
def fromPrior(self):
# Mean magnitudes
self.mag = Limits.magMin + Limits.magRange*rng.rand(self.numImages)
# Time delays
self.tau = Limits.tauMin\
+ Limits.tauRange*rng.rand(self.numImages)
self.tau[0] = 0.
# Microlensing amplitudes
self.logSig_ml = Limits.logSig_mlMin + Limits.logSig_mlRange\
*rng.rand(self.numImages)
# Microlensing smoothness
self.alpha_ml = Limits.alpha_mlMin + Limits.alpha_mlRange\
*rng.rand()
# QSO Variability
self.logSig_qso = Limits.logSig_qsoMin +\
Limits.logSig_qsoRange*rng.rand()
self.logTau_qso = Limits.logTau_qsoMin +\
Limits.logTau_qsoRange*rng.rand()
@property
def logPrior(self):
logP = 0.
if np.any(np.logical_or(self.mag < Limits.magMin,\
self.mag > Limits.magMax)):
logP = -np.inf
if np.any(np.logical_or(self.tau < Limits.tauMin,\
self.tau > Limits.tauMax)):
logP = -np.inf
if np.any(np.logical_or(self.logSig_ml < Limits.logSig_mlMin,\
self.logSig_ml > Limits.logSig_mlMax)):
logP = -np.inf
if self.alpha_ml < Limits.alpha_mlMin or self.alpha_ml >\
Limits.alpha_mlMax:
logP = -np.inf
if self.logSig_qso < Limits.logSig_qsoMin or\
self.logSig_qso > Limits.logSig_qsoMax:
logP = -np.inf
if self.logTau_qso < Limits.logTau_qsoMin or\
self.logTau_qso > Limits.logTau_qsoMax:
logP = -np.inf
return logP
def logLikelihood(self, data):
assert self.numImages == data.numImages
which = [np.nonzero(data.id == i)[0]\
for i in xrange(0, self.numImages)]
# Mean vector
m = np.empty(data.t.size)
for i in xrange(0, self.numImages):
m[which[i]] = self.mag[i]
# Covariance matrix
[t1, t2] = np.meshgrid(data.t, data.t)
lags = np.abs(t2 - t1)
plt.imshow(lags)
plt.show()
ids = np.meshgrid(data.id, data.id)
equal = ids[0] == ids[1]
## try:
## L = la.cholesky(C)
## except:
## return -np.inf
# y = data.y - m
# logDeterminant = 2.0*np.sum(np.log(np.diag(L)))
# solution = la.cho_solve((L, True), y)
# exponent = np.dot(y, solution)
# logL = -0.5*data.t.size*np.log(2.0*np.pi) - 0.5*logDeterminant - 0.5*exponent
return 0.
@property
def vector(self):
"""
Stack all parameters into a vector
"""
return np.hstack([self.mag, self.tau, self.logSig_ml,\
self.alpha_ml, self.logSig_qso,\
self.logTau_qso])
def fromVector(self, vec):
"""
Set all parameters from the vector
"""
self.mag = vec[0:self.numImages]
self.tau = vec[self.numImages:2*self.numImages]
self.logSig_ml = vec[2*self.numImages:3*self.numImages]
self.alpha_ml = vec[3*self.numImages]
self.logSig_qso = vec[3*self.numImages + 1]
self.logTau_qso = vec[3*self.numImages + 2]
if __name__ == '__main__':
data = Data()
data.load('j1131.txt')
Limits.initialise(data)
model = TDModel(data.numImages)
model.fromPrior()
| gpl-3.0 |
robbymeals/scikit-learn | examples/decomposition/plot_sparse_coding.py | 247 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
clairetang6/bokeh | examples/app/export_csv/main.py | 3 | 2220 | ### NOTE: The csv export will not work on Safari
import bokeh
import pandas as pd
from bokeh.plotting import ColumnDataSource
from bokeh.models import CustomJS, HBox, VBox, VBoxForm
from bokeh.models.widgets import Slider, Button, DataTable, TableColumn
from bokeh.io import curdoc, vform
# note this is fake data
df = pd.read_csv('salary_data.csv')
salary_range = Slider(title="Max Salary", start=10000, end=250000, value=150000, step=1000)
button = Button(label="Download")
button.button_type = "success"
source = ColumnDataSource(data=dict())
columns = [TableColumn(field="name", title="Employee Name"),
TableColumn(field="salary", title="Income"),
TableColumn(field="years_experience", title="Experience (years)")]
data_table = DataTable(source=source, columns=columns)
def update(attr, old, new):
curr_df = df[df['salary'] <= salary_range.value].dropna()
source.data = dict(name=curr_df['name'].tolist(),
salary=curr_df['salary'].tolist(),
years_experience=curr_df['years_experience'].tolist())
salary_range.on_change('value', update)
js_callback = """
var data = source.get('data');
var filetext = 'name,income,years_experience\\n';
for (i=0; i < data['name'].length; i++) {
var currRow = [data['name'][i].toString(),
data['salary'][i].toString(),
data['years_experience'][i].toString().concat('\\n')];
var joined = currRow.join();
filetext = filetext.concat(joined);
}
var filename = 'data_result.csv';
var blob = new Blob([filetext], { type: 'text/csv;charset=utf-8;' });
//addresses IE
if (navigator.msSaveBlob) {
navigator.msSaveBlob(blob, filename);
}
else {
var link = document.createElement("a");
link = document.createElement('a')
link.href = URL.createObjectURL(blob);
link.download = filename
link.target = "_blank";
link.style.visibility = 'hidden';
link.dispatchEvent(new MouseEvent('click'))
}"""
button.callback = CustomJS(args=dict(source=source), code=js_callback)
controls = [salary_range, button]
inputs = HBox(VBoxForm(*controls), width=400)
update(None, None, None)
curdoc().add_root(HBox(inputs, data_table, width=800))
| bsd-3-clause |
hitszxp/scikit-learn | examples/model_selection/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
joefutrelle/pyifcb | ifcb/data/h5utils.py | 1 | 3882 | """
Convenience utilities for ``h5py``.
"""
from contextlib import contextmanager
import numpy as np
import pandas as pd
import h5py as h5
H5_REF_TYPE = h5.special_dtype(ref=h5.Reference)
def clear_h5_group(h5group):
"""
Delete all keys and attrs from an ``h5py.Group``.
:param h5group: the h5py.Group
"""
for k in h5group.keys(): del h5group[k]
for k in h5group.attrs.keys(): del h5group.attrs[k]
class hdfopen(object):
"""
Context manager that opens an ``h5py.Group`` from an ``h5py.File``
or other group.
Parameters:
* path - path to HDF5 file, or open HDF5 group
* group - for HDF5 file paths, the path of the group to return (optional)
for groups, a subgroup to require (optional)
* replace - whether to replace any existing data
:Example:
>>> with hdfopen('myfile.h5','somegroup') as g:
... g.attrs['my_attr'] = 3
"""
def __init__(self, path, group=None, replace=None):
if isinstance(path, h5.Group):
if group is not None:
self.group = path.require_group(group)
else:
self.group = path
if replace:
clear_h5_group(self.group)
self._file = None
else:
mode = 'w' if replace else 'r+'
self._file = h5.File(path, mode)
if group is not None:
self.group = self._file.require_group(group)
else:
self.group = self._file
def close(self):
if self._file is not None:
self._file.close()
def __enter__(self, *args, **kw):
return self.group
def __exit__(self, *args):
self.close()
pass
def pd2hdf(group, df, **kw):
"""
Write ``pandas.DataFrame`` to HDF5 file. This differs
from pandas's own HDF5 support by providing a slightly less
optimized but easier-to-access format. Passes keywords
through to each ``h5py.create_dataset`` operation.
Layout of Pandas ``DataFrame`` / ``Series`` representation:
* ``{path}`` (group): the group containing the dataframe
* ``{path}.ptype`` (attribute): '``DataFrame``'
* ``{path}/columns`` (dataset): 1d array of references to column data
* ``{path}/columns.names`` (attribute, optional): 1d array of column names
* ``{path}/{n}`` (dataset): 1d array of data for column n
* ``{path}/index`` (dataset): 1d array of data for dataframe index
* ``{path}/index.name`` (attribute, optional): name of index
:param group: the ``h5py.Group`` to write the ``DataFrame`` to
"""
group.attrs['ptype'] = 'DataFrame'
refs = []
for i in range(len(df.columns)):
c = group.create_dataset(str(i), data=df.iloc[:,i], **kw)
refs.append(c.ref)
cols = group.create_dataset('columns', data=refs, dtype=H5_REF_TYPE)
if df.columns.dtype == np.int64:
cols.attrs['names'] = [int(col) for col in df.columns]
else:
cols.attrs['names'] = [col.encode('utf8') for col in df.columns]
ix = group.create_dataset('index', data=df.index, **kw)
if df.index.name is not None:
ix.attrs['name'] = df.index.name
def hdf2pd(group):
"""
Read a ``pandas.DataFrame`` from an ``h5py.Group``.
:param group: the ``h5py.Group`` to read from.
"""
if group.attrs['ptype'] != 'DataFrame':
raise ValueError('unrecognized HDF format')
index = group['index']
index_name = index.attrs.get('name',None)
col_refs = group['columns']
col_data = [np.array(group[r]) for r in col_refs]
col_names = col_refs.attrs.get('names')
if type(col_names[0]) == np.bytes_:
col_names = [str(cn,'utf8') for cn in col_names]
data = { k: v for k, v in zip(col_names, col_data) }
index = pd.Series(index, name=index_name)
return pd.DataFrame(data=data, index=index, columns=col_names)
| mit |
JsNoNo/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 222 | 3055 | # Author: Christian Osendorfer <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
cloudera/ibis | ibis/backends/impala/pandas_interop.py | 2 | 3588 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
import tempfile
from posixpath import join as pjoin
import ibis.common.exceptions as com
import ibis.expr.schema as sch
import ibis.util as util
from ibis.config import options
class DataFrameWriter:
"""
Interface class for writing pandas objects to Impala tables
Class takes ownership of any temporary data written to HDFS
"""
def __init__(self, client, df, path=None):
self.client = client
self.hdfs = client.hdfs
self.df = df
self.temp_hdfs_dirs = []
def write_temp_csv(self):
temp_hdfs_dir = pjoin(
options.impala.temp_hdfs_path, 'pandas_{}'.format(util.guid())
)
self.hdfs.mkdir(temp_hdfs_dir)
# Keep track of the temporary HDFS file
self.temp_hdfs_dirs.append(temp_hdfs_dir)
# Write the file to HDFS
hdfs_path = pjoin(temp_hdfs_dir, '0.csv')
self.write_csv(hdfs_path)
return temp_hdfs_dir
def write_csv(self, path):
# Use a temporary dir instead of a temporary file
# to provide Windows support and avoid #2267
# https://github.com/ibis-project/ibis/issues/2267
with tempfile.TemporaryDirectory() as f:
# Write the DataFrame to the temporary file path
tmp_file_path = os.path.join(f, 'impala_temp_file.csv')
if options.verbose:
util.log(
'Writing DataFrame to temporary directory {}'.format(
tmp_file_path
)
)
self.df.to_csv(
tmp_file_path,
header=False,
index=False,
sep=',',
quoting=csv.QUOTE_NONE,
escapechar='\\',
na_rep='#NULL',
)
if options.verbose:
util.log('Writing CSV to: {0}'.format(path))
self.hdfs.put(path, tmp_file_path)
return path
def get_schema(self):
# define a temporary table using delimited data
return sch.infer(self.df)
def delimited_table(self, csv_dir, name=None, database=None):
temp_delimited_name = 'ibis_tmp_pandas_{0}'.format(util.guid())
schema = self.get_schema()
return self.client.delimited_file(
csv_dir,
schema,
name=temp_delimited_name,
database=database,
delimiter=',',
na_rep='#NULL',
escapechar='\\\\',
external=True,
persist=False,
)
def __del__(self):
try:
self.cleanup()
except com.IbisError:
pass
def cleanup(self):
for path in self.temp_hdfs_dirs:
self.hdfs.rmdir(path)
self.temp_hdfs_dirs = []
self.csv_dir = None
def write_temp_dataframe(client, df):
writer = DataFrameWriter(client, df)
path = writer.write_temp_csv()
return writer, writer.delimited_table(path)
| apache-2.0 |
sannecottaar/burnman | misc/benchmarks/solidsolution_benchmarks.py | 6 | 6100 | from __future__ import absolute_import
# Benchmarks for the solid solution class
import os.path
import sys
sys.path.insert(1, os.path.abspath('../..'))
import burnman
from burnman import minerals
from burnman.processchemistry import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
'''
Solvus shapes (a proxy for Gibbs free energy checking
'''
# van Laar parameter
# Figure 2a of Holland and Powell, 2003
# Temperature dependence
# Figure 2b of Holland and Powell, 2003
# A specific solvus example: sanidine-high albite
# Includes asymmetry and pressure, temperature dependence
# Figure 3 of Holland and Powell, 2003
'''
Excess properties
'''
# Configurational entropy
# Navrotsky and Kleppa, 1967
class o_d_spinel(burnman.SolidSolution):
def __init__(self):
self.name = 'orthopyroxene'
self.solution_type = 'symmetric'
self.endmembers = [[minerals.HP_2011_ds62.sp(), '[Mg][Al]2O4'], [
minerals.HP_2011_ds62.sp(), '[Al][Mg1/2Al1/2]2O4']]
self.energy_interaction = [[0.0]]
burnman.SolidSolution.__init__(self)
comp = np.linspace(0.001, 0.999, 100)
sp = o_d_spinel()
sp_entropies = np.empty_like(comp)
sp_entropies_NK1967 = np.empty_like(comp)
for i, c in enumerate(comp):
molar_fractions = [1.0 - c, c]
sp.set_composition(np.array(molar_fractions))
sp.set_state(1e5, 298.15)
sp_entropies[i] = sp.solution_model._configurational_entropy(
molar_fractions)
sp_entropies_NK1967[i] = -8.3145 * (c * np.log(c) + (1. - c) * np.log(1. - c) + c * np.log(
c / 2.) + (2. - c) * np.log(1. - c / 2.)) # eq. 7 in Navrotsky and Kleppa, 1967.
# fig1 = mpimg.imread('configurational_entropy.png') # Uncomment these two lines if you want to overlay the plot on a screengrab from SLB2011
# plt.imshow(fig1, extent=[0.0, 1.0,0.,17.0], aspect='auto')
plt.plot(comp, sp_entropies_NK1967, 'b-', linewidth=3.)
plt.plot(comp, sp_entropies, 'r--', linewidth=3.)
plt.xlim(0.0, 1.0)
plt.ylim(0., 17.0)
plt.ylabel("Configurational entropy of solution (J/K/mol)")
plt.xlabel("fraction inverse spinel")
plt.show()
# Configurational entropy
# Figure 3b of Stixrude and Lithgow-Bertelloni, 2011
class orthopyroxene_red(burnman.SolidSolution):
def __init__(self):
self.name = 'orthopyroxene'
self.solution_type = 'symmetric'
self.endmembers = [[minerals.SLB_2011.enstatite(), 'Mg[Mg][Si]SiO6'], [
minerals.SLB_2011.mg_tschermaks(), 'Mg[Al][Al]SiO6']]
self.energy_interaction = [[0.0]]
burnman.SolidSolution.__init__(self)
class orthopyroxene_blue(burnman.SolidSolution):
def __init__(self):
self.name = 'orthopyroxene'
self.solution_type = 'symmetric'
self.endmembers = [[minerals.SLB_2011.enstatite(), 'Mg[Mg]Si2O6'], [
minerals.SLB_2011.mg_tschermaks(), 'Mg[Al]AlSiO6']]
self.energy_interaction = [[0.0]]
burnman.SolidSolution.__init__(self)
class orthopyroxene_long_dashed(burnman.SolidSolution):
def __init__(self):
self.name = 'orthopyroxene'
self.solution_type = 'symmetric'
self.endmembers = [[minerals.SLB_2011.enstatite(), 'Mg[Mg]Si2O6'], [
minerals.SLB_2011.mg_tschermaks(), '[Mg1/2Al1/2]2AlSiO6']]
self.energy_interaction = [[10.0e3]]
burnman.SolidSolution.__init__(self)
class orthopyroxene_short_dashed(burnman.SolidSolution):
def __init__(self):
self.name = 'orthopyroxene'
self.solution_type = 'symmetric'
self.endmembers = [[minerals.SLB_2011.enstatite(), 'Mg[Mg][Si]2O6'], [
minerals.SLB_2011.mg_tschermaks(), 'Mg[Al][Al1/2Si1/2]2O6']]
self.energy_interaction = [[0.0]]
burnman.SolidSolution.__init__(self)
comp = np.linspace(0, 1.0, 100)
opx_models = [orthopyroxene_red(), orthopyroxene_blue(),
orthopyroxene_long_dashed(), orthopyroxene_short_dashed()]
opx_entropies = [np.empty_like(comp) for model in opx_models]
for idx, model in enumerate(opx_models):
for i, c in enumerate(comp):
molar_fractions = [1.0 - c, c]
model.set_composition(np.array(molar_fractions))
model.set_state(0., 0.)
opx_entropies[idx][
i] = model.solution_model._configurational_entropy(molar_fractions)
fig1 = mpimg.imread('configurational_entropy.png')
# Uncomment these two lines if you want to overlay the plot
# on a screengrab from SLB2011
plt.imshow(fig1, extent=[0.0, 1.0, 0., 17.0], aspect='auto')
plt.plot(comp, opx_entropies[0], 'r--', linewidth=3.)
plt.plot(comp, opx_entropies[1], 'b--', linewidth=3.)
plt.plot(comp, opx_entropies[2], 'g--', linewidth=3.)
plt.plot(comp, opx_entropies[3], 'g-.', linewidth=3.)
plt.xlim(0.0, 1.0)
plt.ylim(0., 17.0)
plt.ylabel("Configurational entropy of solution (J/K/mol)")
plt.xlabel("cats fraction")
plt.show()
# Excess volume of solution
# Excess energy of solution
# Figure 5 of Stixrude and Lithgow-Bertelloni, 2011
class clinopyroxene(burnman.SolidSolution):
def __init__(self):
self.name = 'clinopyroxene'
self.solution_type = 'asymmetric'
self.endmembers = [[minerals.SLB_2011.diopside(), '[Ca][Mg][Si]2O6'], [
minerals.SLB_2011.ca_tschermaks(), '[Ca][Al][Si1/2Al1/2]2O6']]
self.energy_interaction = [[26.e3]]
self.alphas = [1.0, 3.5]
burnman.SolidSolution.__init__(self)
cpx = clinopyroxene()
comp = np.linspace(0, 1.0, 100)
gibbs = np.empty_like(comp)
for i, c in enumerate(comp):
cpx.set_composition(np.array([1.0 - c, c]))
cpx.set_state(0., 0.)
gibbs[i] = cpx.excess_gibbs
fig1 = mpimg.imread('dicats.png')
# Uncomment these two lines if you want to overlay the plot
# on a screengrab from SLB2011
plt.imshow(fig1, extent=[0.0, 1.0, -2., 8.0], aspect='auto')
plt.plot(comp, gibbs / 1000., 'b--', linewidth=3.)
plt.xlim(0.0, 1.0)
plt.ylim(-2., 8.0)
plt.ylabel("Excess energy of solution (kJ/mol)")
plt.xlabel("cats fraction")
plt.show()
| gpl-2.0 |
marcocaccin/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 103 | 4394 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
Asurada2015/TFAPI_translation | Variables/tf_get_Variable.py | 1 | 1313 | """tf.get_variable(name, shape, initializer): name就是变量的名称,shape是变量的维度,initializer是变量初始化的方式,初始化的方式有以下几种:
tf.constant_initializer:常量初始化函数
tf.random_normal_initializer:正态分布
tf.truncated_normal_initializer:截取的正态分布
tf.random_uniform_initializer:均匀分布
tf.zeros_initializer:全部是0
tf.ones_initializer:全是1
tf.uniform_unit_scaling_initializer:满足均匀分布,但不影响输出数量级的随机值"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
a1 = tf.get_variable(name='a1', shape=[2, 3], initializer=tf.random_normal_initializer(mean=0, stddev=1))
a2 = tf.get_variable(name='a2', shape=[1], initializer=tf.constant_initializer(1))
a3 = tf.get_variable(name='a3', shape=[2, 3], initializer=tf.ones_initializer())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(a1))
print(sess.run(a2))
print(sess.run(a3))
# [[0.42299312 - 0.25459203 - 0.88605702]
# [0.22410156 1.34326422 - 0.39722782]]
# [1.]
# [[1. 1. 1.]
# [1. 1. 1.]]
# 注意:不同的变量之间不能有相同的名字,除非你定义了variable_scope,这样才可以有相同的名字。
| apache-2.0 |
pandegroup/osprey | osprey/plugins/plugin_pylearn2.py | 5 | 8020 | from __future__ import print_function, absolute_import, division
import numpy as np
import re
from string import Template
from sklearn.base import BaseEstimator
from sklearn.metrics import accuracy_score, mean_squared_error
from osprey.dataset_loaders import BaseDatasetLoader
class Pylearn2Estimator(BaseEstimator):
"""
Wrapper for pylearn2 models that conforms to the sklearn BaseEstimator API.
This class does not handle Train objects directly, because it is much
simpler to handle set_params with the YAML and simple string formatting.
YAML strings should be formatted for compatibility with string.Template.
Parameters
----------
yaml_string : str
YAML string defining the model.
"""
def __init__(self, yaml_string, **kwargs):
self.trainer = None
self.yaml_string = yaml_string
self.set_params(**kwargs)
def _get_param_names(self):
"""
Get mappable parameters from YAML.
"""
template = Template(self.yaml_string)
names = ['yaml_string'] # always include the template
for match in re.finditer(template.pattern, template.template):
name = match.group('named') or match.group('braced')
assert name is not None
names.append(name)
return names
def _get_dataset(self, X, y=None):
"""
Construct a pylearn2 dataset.
Parameters
----------
X : array_like
Training examples.
y : array_like, optional
Labels.
"""
from pylearn2.datasets import DenseDesignMatrix
X = np.asarray(X)
assert X.ndim > 1
if y is not None:
y = self._get_labels(y)
if X.ndim == 2:
return DenseDesignMatrix(X=X, y=y)
return DenseDesignMatrix(topo_view=X, y=y)
def _get_labels(self, y):
"""
Construct pylearn2 dataset labels.
Parameters
----------
y : array_like, optional
Labels.
"""
y = np.asarray(y)
if y.ndim == 1:
return y.reshape((y.size, 1))
assert y.ndim == 2
return y
def fit(self, X, y=None):
"""
Build a trainer and run main_loop.
Parameters
----------
X : array_like
Training examples.
y : array_like, optional
Labels.
"""
from pylearn2.config import yaml_parse
from pylearn2.train import Train
# build trainer
params = self.get_params()
yaml_string = Template(self.yaml_string).substitute(params)
self.trainer = yaml_parse.load(yaml_string)
assert isinstance(self.trainer, Train)
if self.trainer.dataset is not None:
raise ValueError('Train YAML database must evaluate to None.')
self.trainer.dataset = self._get_dataset(X, y)
# update monitoring dataset(s)
if (hasattr(self.trainer.algorithm, 'monitoring_dataset') and
self.trainer.algorithm.monitoring_dataset is not None):
monitoring_dataset = self.trainer.algorithm.monitoring_dataset
if len(monitoring_dataset) == 1 and '' in monitoring_dataset:
monitoring_dataset[''] = self.trainer.dataset
else:
monitoring_dataset['train'] = self.trainer.dataset
self.trainer.algorithm._set_monitoring_dataset(monitoring_dataset)
else:
self.trainer.algorithm._set_monitoring_dataset(
self.trainer.dataset)
# run main loop
self.trainer.main_loop()
def predict(self, X):
"""
Get model predictions.
Parameters
----------
X : array_like
Test dataset.
"""
return self._predict(X)
def _predict(self, X, method='fprop'):
"""
Get model predictions.
See pylearn2.scripts.mlp.predict_csv and
http://fastml.com/how-to-get-predictions-from-pylearn2/.
Parameters
----------
X : array_like
Test dataset.
method : str
Model method to call for prediction.
"""
import theano
X_sym = self.trainer.model.get_input_space().make_theano_batch()
y_sym = getattr(self.trainer.model, method)(X_sym)
f = theano.function([X_sym], y_sym, allow_input_downcast=True)
return f(X)
def score(self, X, y):
"""
Score predictions.
Parameters
----------
X : array_like
Test examples.
y : array_like, optional
Labels.
"""
raise NotImplementedError('should be implemented in subclasses')
class Pylearn2Classifier(Pylearn2Estimator):
"""
pylearn2 classifier.
"""
def _get_labels(self, y):
"""
Construct pylearn2 dataset labels.
Parameters
----------
y : array_like, optional
Labels.
"""
y = np.asarray(y)
assert y.ndim == 1
# convert to one-hot
labels = np.unique(y).tolist()
oh = np.zeros((y.size, len(labels)), dtype=float)
for i, label in enumerate(y):
oh[i, labels.index(label)] = 1.
return oh
def predict_proba(self, X):
"""
Get model predictions.
Parameters
----------
X : array_like
Test dataset.
"""
return self._predict(X)
def predict(self, X):
"""
Get model predictions.
Parameters
----------
X : array_like
Test dataset.
"""
return np.argmax(self._predict(X), axis=1)
def score(self, X, y):
"""
Score predictions.
Parameters
----------
X : array_like
Test examples.
y : array_like, optional
Labels.
"""
return accuracy_score(y, self.predict(X))
class Pylearn2Regressor(Pylearn2Estimator):
"""
pylearn2 regressor.
"""
def score(self, X, y):
"""
Score predictions.
Parameters
----------
X : array_like
Test examples.
y : array_like, optional
Labels.
"""
return mean_squared_error(y, self.predict(X))
class Pylearn2Autoencoder(Pylearn2Estimator):
"""
pylearn2 autoencoder.
"""
def _predict(self, X, method='reconstruct'):
return super(Pylearn2Autoencoder, self)._predict(X, method)
def score(self, X, y):
"""
Score predictions.
Parameters
----------
X : array_like
Test examples.
y : array_like, optional
Labels (not used).
"""
return mean_squared_error(X, self.predict(X))
class Pylearn2DatasetLoader(BaseDatasetLoader):
"""
pylearn2 dataset loader.
Parameters
----------
yaml_string : str
YAML specification of a pylearn2 dataset.
one_hot : bool, optional
Take argmax of one-hot labels to get classes.
"""
short_name = 'pylearn2_dataset'
def __init__(self, yaml_string, one_hot=False):
self.yaml_string = yaml_string
self.one_hot = one_hot
def load(self):
"""
Load the dataset using pylearn2.config.yaml_parse.
"""
from pylearn2.config import yaml_parse
from pylearn2.datasets import Dataset
dataset = yaml_parse.load(self.yaml_string)
assert isinstance(dataset, Dataset)
data = dataset.iterator(mode='sequential', num_batches=1,
data_specs=dataset.data_specs,
return_tuple=True).next()
if len(data) == 2:
X, y = data
y = np.squeeze(y)
if self.one_hot:
y = np.argmax(y, axis=1)
else:
X = data
y = None
return X, y
| apache-2.0 |
cpcloud/dask | dask/array/learn.py | 5 | 3248 | from __future__ import absolute_import, division, print_function
import numpy as np
from toolz import merge, partial
from ..base import tokenize
from .. import threaded
def _partial_fit(model, x, y, kwargs=None):
kwargs = kwargs or dict()
model.partial_fit(x, y, **kwargs)
return model
def fit(model, x, y, get=threaded.get, **kwargs):
""" Fit scikit learn model against dask arrays
Model must support the ``partial_fit`` interface for online or batch
learning.
This method will be called on dask arrays in sequential order. Ideally
your rows are independent and identically distributed.
Parameters
----------
model: sklearn model
Any model supporting partial_fit interface
x: dask Array
Two dimensional array, likely tall and skinny
y: dask Array
One dimensional array with same chunks as x's rows
kwargs:
options to pass to partial_fit
Examples
--------
>>> import dask.array as da
>>> X = da.random.random((10, 3), chunks=(5, 3))
>>> y = da.random.randint(0, 2, 10, chunks=(5,))
>>> from sklearn.linear_model import SGDClassifier
>>> sgd = SGDClassifier()
>>> sgd = da.learn.fit(sgd, X, y, classes=[1, 0])
>>> sgd # doctest: +SKIP
SGDClassifier(alpha=0.0001, class_weight=None, epsilon=0.1, eta0=0.0,
fit_intercept=True, l1_ratio=0.15, learning_rate='optimal',
loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5,
random_state=None, shuffle=False, verbose=0, warm_start=False)
This passes all of X and y through the classifier sequentially. We can use
the classifier as normal on in-memory data
>>> import numpy as np
>>> sgd.predict(np.random.random((4, 3))) # doctest: +SKIP
array([1, 0, 0, 1])
Or predict on a larger dataset
>>> z = da.random.random((400, 3), chunks=(100, 3))
>>> da.learn.predict(sgd, z) # doctest: +SKIP
dask.array<x_11, shape=(400,), chunks=((100, 100, 100, 100),), dtype=int64>
"""
assert x.ndim == 2
assert y.ndim == 1
assert x.chunks[0] == y.chunks[0]
assert hasattr(model, 'partial_fit')
if len(x.chunks[1]) > 1:
x = x.reblock(chunks=(x.chunks[0], sum(x.chunks[1])))
nblocks = len(x.chunks[0])
name = 'fit-' + tokenize(model, x, y, kwargs)
dsk = {(name, -1): model}
dsk.update(dict(((name, i), (_partial_fit, (name, i - 1),
(x.name, i, 0),
(y.name, i), kwargs))
for i in range(nblocks)))
return get(merge(x.dask, y.dask, dsk), (name, nblocks - 1))
def _predict(model, x):
return model.predict(x)[:, None]
def predict(model, x):
""" Predict with a scikit learn model
Parameters
----------
model : scikit learn classifier
x : dask Array
See docstring for ``da.learn.fit``
"""
assert x.ndim == 2
if len(x.chunks[1]) > 1:
x = x.reblock(chunks=(x.chunks[0], sum(x.chunks[1])))
func = partial(_predict, model)
xx = np.zeros((1, x.shape[1]), dtype=x.dtype)
dt = model.predict(xx).dtype
return x.map_blocks(func, chunks=(x.chunks[0], (1,)), dtype=dt).squeeze()
| bsd-3-clause |
scalable-networks/gnuradio-3.7.0.1 | gr-filter/examples/fir_filter_ccc.py | 5 | 3230 | #!/usr/bin/env python
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_out = blocks.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
bthirion/scikit-learn | sklearn/manifold/tests/test_isomap.py | 121 | 4301 | from itertools import product
import numpy as np
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_equal)
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
def test_isomap_clone_bug():
# regression test for bug reported in #6062
model = manifold.Isomap()
for n_neighbors in [10, 15, 20]:
model.set_params(n_neighbors=n_neighbors)
model.fit(np.random.rand(50, 2))
assert_equal(model.nbrs_.n_neighbors,
n_neighbors)
| bsd-3-clause |
nmayorov/scikit-learn | sklearn/tests/test_learning_curve.py | 59 | 10869 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
kylerbrown/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
wchan/tensorflow | tensorflow/python/client/notebook.py | 26 | 4596 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
import sys
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"password", None,
"Password to require. If set, the server will allow public access."
" Only used if notebook config file does not exist.")
flags.DEFINE_string("notebook_dir", "experimental/brain/notebooks",
"root location where to store notebooks")
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
app.run()
| apache-2.0 |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pandas/tests/test_categorical.py | 7 | 182005 | # -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
from pandas.types.dtypes import CategoricalDtype
from pandas.types.common import (is_categorical_dtype,
is_object_dtype,
is_float_dtype,
is_integer_dtype)
import pandas as pd
import pandas.compat as compat
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex, isnull)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_numpy_array_equal(subf._codes,
np.array([0, 1, 1], dtype=np.int8))
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_numpy_array_equal(subf._codes,
np.array([2, 2, 2], dtype=np.int8))
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical(['c', 'b', 'b', 'a', 'a', 'c', 'c', 'c'],
ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical(arr, ordered=False)
self.assertFalse(factor.ordered)
# this however will raise as cannot be sorted
self.assertRaises(
TypeError, lambda: Categorical(arr, ordered=True))
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"], dtype=np.object_)
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_index_equal(c2.categories, Index(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(["a", "b", "c", "a"]),
categories=["a", "b", "c", "d"])
tm.assert_categorical_equal(c1, c2)
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(is_integer_dtype(cat.categories))
# https://github.com/pandas-dev/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
tm.assert_categorical_equal(ci.values, Categorical(ci))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
tm.assert_categorical_equal(ci.values,
Categorical(ci.astype(object),
categories=ci.categories))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = Categorical(xrange(3))
tm.assert_categorical_equal(cat, exp)
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
tm.assert_categorical_equal(cat, exp)
def test_constructor_with_datetimelike(self):
# 12077
# constructor wwth a datetimelike and NaT
for dtl in [pd.date_range('1995-01-01 00:00:00',
periods=5, freq='s'),
pd.date_range('1995-01-01 00:00:00',
periods=5, freq='s', tz='US/Eastern'),
pd.timedelta_range('1 day', periods=5, freq='s')]:
s = Series(dtl)
c = Categorical(s)
expected = type(dtl)(s)
expected.freq = None
tm.assert_index_equal(c.categories, expected)
self.assert_numpy_array_equal(c.codes, np.arange(5, dtype='int8'))
# with NaT
s2 = s.copy()
s2.iloc[-1] = pd.NaT
c = Categorical(s2)
expected = type(dtl)(s2.dropna())
expected.freq = None
tm.assert_index_equal(c.categories, expected)
exp = np.array([0, 1, 2, 3, -1], dtype=np.int8)
self.assert_numpy_array_equal(c.codes, exp)
result = repr(c)
self.assertTrue('NaT' in result)
def test_constructor_from_index_series_datetimetz(self):
idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
result = pd.Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = pd.Categorical(pd.Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_timedelta(self):
idx = pd.timedelta_range('1 days', freq='D', periods=3)
result = pd.Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = pd.Categorical(pd.Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_period(self):
idx = pd.period_range('2015-01-01', freq='D', periods=3)
result = pd.Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = pd.Categorical(pd.Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_invariant(self):
# GH 14190
vals = [
np.array([1., 1.2, 1.8, np.nan]),
np.array([1, 2, 3], dtype='int64'),
['a', 'b', 'c', np.nan],
[pd.Period('2014-01'), pd.Period('2014-02'), pd.NaT],
[pd.Timestamp('2014-01-01'), pd.Timestamp('2014-01-02'), pd.NaT],
[pd.Timestamp('2014-01-01', tz='US/Eastern'),
pd.Timestamp('2014-01-02', tz='US/Eastern'), pd.NaT],
]
for val in vals:
c = Categorical(val)
c2 = Categorical(c)
tm.assert_categorical_equal(c, c2)
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
tm.assert_categorical_equal(exp, res)
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_validate_ordered(self):
# see gh-14058
exp_msg = "'ordered' must either be 'True' or 'False'"
exp_err = TypeError
# This should be a boolean.
ordered = np.array([0, 1, 2])
with tm.assertRaisesRegexp(exp_err, exp_msg):
Categorical([1, 2, 3], ordered=ordered)
with tm.assertRaisesRegexp(exp_err, exp_msg):
Categorical.from_codes([0, 0, 1], categories=['a', 'b', 'c'],
ordered=ordered)
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
tm.assert_categorical_equal(result, expected)
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_argsort(self):
c = Categorical([5, 3, 1, 4, 2], ordered=True)
expected = np.array([2, 4, 1, 3, 0])
tm.assert_numpy_array_equal(c.argsort(ascending=True), expected,
check_dtype=False)
expected = expected[::-1]
tm.assert_numpy_array_equal(c.argsort(ascending=False), expected,
check_dtype=False)
def test_numpy_argsort(self):
c = Categorical([5, 3, 1, 4, 2], ordered=True)
expected = np.array([2, 4, 1, 3, 0])
tm.assert_numpy_array_equal(np.argsort(c), expected,
check_dtype=False)
msg = "the 'kind' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.argsort,
c, kind='mergesort')
msg = "the 'axis' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.argsort,
c, axis=0)
msg = "the 'order' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.argsort,
c, order='C')
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
tm.assert_categorical_equal(factor, self.factor)
def test_describe(self):
# string type
desc = self.factor.describe()
self.assertTrue(self.factor.ordered)
exp_index = pd.CategoricalIndex(['a', 'b', 'c'], name='categories',
ordered=self.factor.ordered)
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=exp_index)
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
exp_index = pd.CategoricalIndex(['a', 'b', 'c', 'd'],
ordered=self.factor.ordered,
name='categories')
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=exp_index)
tm.assert_frame_equal(desc, expected)
# check an integer one
cat = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1])
desc = cat.describe()
exp_index = pd.CategoricalIndex([1, 2, 3], ordered=cat.ordered,
name='categories')
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=exp_index)
tm.assert_frame_equal(desc, expected)
# https://github.com/pandas-dev/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected, check_categorical=False)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected, check_categorical=False)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.int8)
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assert_index_equal(cat1.categories, exp_idx)
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.int8)
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assert_index_equal(cat2.categories, exp_idx2)
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype=np.int8)
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assert_index_equal(cat3.categories, exp_idx)
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1], dtype=np.int64)
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_index_equal(s.categories, Index([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assert_index_equal(cat1.categories, Index(['a', 'b', 'c']))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assert_index_equal(cat2.categories, Index(['b', 'c', 'a']))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assert_index_equal(cat3.categories, Index(['a', 'b', 'c']))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assert_index_equal(cat4.categories, Index(['b', 'c', 'a']))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# removed in 0.19.0
msg = "can\'t set attribute"
with tm.assertRaisesRegexp(AttributeError, msg):
cat.ordered = True
with tm.assertRaisesRegexp(AttributeError, msg):
cat.ordered = False
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = Index(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_index_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_index_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = Index(["a", "b", "c"])
self.assert_index_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes,
np.array([0, -1, -1, 0], dtype=np.int8))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes,
np.array([0, 1, -1, 0], dtype=np.int8))
self.assert_index_equal(res.categories, Index(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = Index(["a", "b", "c", "d"])
self.assert_index_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes,
np.array([0, 1, 2, 3, 0], dtype=np.int8))
self.assert_index_equal(c.categories, Index([1, 2, 3, 4]))
exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
self.assert_numpy_array_equal(c.get_values(), exp)
# all "pointers" to '4' must be changed from 3 to 0,...
c = c.set_categories([4, 3, 2, 1])
# positions are changed
self.assert_numpy_array_equal(c._codes,
np.array([3, 2, 1, 0, 3], dtype=np.int8))
# categories are now in new order
self.assert_index_equal(c.categories, Index([4, 3, 2, 1]))
# output is the same
exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
self.assert_numpy_array_equal(c.get_values(), exp)
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(),
np.array([1, 2, 3, 1], dtype=np.int64))
self.assert_index_equal(res.categories, Index([1, 2, 3]))
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
exp_cat = Index(["a", "b", "c"])
self.assert_index_equal(cat.categories, exp_cat)
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(),
np.array([1, 2, 3, 1], dtype=np.int64))
self.assert_index_equal(cat.categories, Index([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = Index(["a", "b", "c", "d", "e"])
exp_categories_dropped = Index(["a", "b", "c", "d"])
self.assert_index_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_index_equal(res.categories, exp_categories_dropped)
self.assert_index_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_index_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_index_equal(res.categories,
Index(np.array(["a", "b", "c"])))
exp_codes = np.array([0, 1, 2, -1], dtype=np.int8)
self.assert_numpy_array_equal(res.codes, exp_codes)
self.assert_index_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_index_equal(out.categories, Index(['B', 'D', 'F']))
exp_codes = np.array([2, -1, 1, 0, 1, 2, -1], dtype=np.int8)
self.assert_numpy_array_equal(out.codes, exp_codes)
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_index_equal(c.categories, Index(["a", "b"]))
self.assert_numpy_array_equal(c._codes,
np.array([0, 1, -1, 0], dtype=np.int8))
c[1] = np.nan
self.assert_index_equal(c.categories, Index(["a", "b"]))
self.assert_numpy_array_equal(c._codes,
np.array([0, -1, -1, 0], dtype=np.int8))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_index_equal(c.categories, Index(["a", "b", np.nan]))
self.assert_numpy_array_equal(c._codes,
np.array([0, 1, 2, 0], dtype=np.int8))
c[1] = np.nan
self.assert_index_equal(c.categories, Index(["a", "b", np.nan]))
self.assert_numpy_array_equal(c._codes,
np.array([0, 2, 2, 0], dtype=np.int8))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_index_equal(c.categories, Index(["a", "b", np.nan]))
self.assert_numpy_array_equal(c._codes,
np.array([0, 1, 2, 0], dtype=np.int8))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_index_equal(c.categories, Index(["a", "b"]))
self.assert_numpy_array_equal(c._codes,
np.array([0, 1, -1, 0], dtype=np.int8))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_index_equal(c.categories, Index(["a", "b", np.nan]))
self.assert_numpy_array_equal(c._codes,
np.array([0, 1, -1, 0], dtype=np.int8))
c[1] = np.nan
self.assert_index_equal(c.categories, Index(["a", "b", np.nan]))
self.assert_numpy_array_equal(c._codes,
np.array([0, 2, -1, 0], dtype=np.int8))
# Remove null categories (GH 10156)
cases = [([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = Index(["a", "b"])
res = cat.unique()
self.assert_index_equal(res.categories, exp)
self.assert_categorical_equal(res, cat)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = Index(["c", "a", "b"])
res = cat.unique()
self.assert_index_equal(res.categories, exp)
exp_cat = Categorical(exp, categories=['c', 'a', 'b'])
tm.assert_categorical_equal(res, exp_cat)
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = Index(["b", "a"])
self.assert_index_equal(res.categories, exp)
exp_cat = Categorical(["b", np.nan, "a"], categories=["b", "a"])
tm.assert_categorical_equal(res, exp_cat)
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp_cat = Categorical(['b', 'a'], categories=['a', 'b'], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp_cat = Categorical(['c', 'b', 'a'], categories=['a', 'b', 'c'],
ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp_cat = Categorical(['b', 'a'], categories=['a', 'b'], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp_cat = Categorical(['b', np.nan, 'a'], categories=['a', 'b'],
ordered=True)
tm.assert_categorical_equal(res, exp_cat)
def test_unique_index_series(self):
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1])
# Categorical.unique sorts categories by appearance order
# if ordered=False
exp = Categorical([3, 1, 2], categories=[3, 1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(pd.Series(c).unique(), exp)
c = Categorical([1, 1, 2, 2], categories=[3, 2, 1])
exp = Categorical([1, 2], categories=[1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(pd.Series(c).unique(), exp)
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1], ordered=True)
# Categorical.unique keeps categories order if ordered=True
exp = Categorical([3, 1, 2], categories=[3, 2, 1], ordered=True)
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(pd.Series(c).unique(), exp)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
tm.assert_categorical_equal(res, exp)
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
tm.assert_categorical_equal(res, exp)
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
tm.assert_categorical_equal(res, exp)
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
tm.assert_categorical_equal(res, exp)
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
tm.assert_categorical_equal(res, exp)
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
tm.assert_categorical_equal(res, exp)
def test_sort_values(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
self.assert_index_equal(res.categories, cat.categories)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
self.assert_index_equal(res.categories, cat.categories)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
self.assert_index_equal(res.categories, cat.categories)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort_values(inplace=True)
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
self.assert_index_equal(res.categories, cat.categories)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = Index(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_index_equal(res.categories, exp_categories)
def test_sort_values_na_position(self):
# see gh-12882
cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True)
exp_categories = Index([2, 5])
exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan])
res = cat.sort_values() # default arguments
self.assert_numpy_array_equal(res.__array__(), exp)
self.assert_index_equal(res.categories, exp_categories)
exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0])
res = cat.sort_values(ascending=True, na_position='first')
self.assert_numpy_array_equal(res.__array__(), exp)
self.assert_index_equal(res.categories, exp_categories)
exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0])
res = cat.sort_values(ascending=False, na_position='first')
self.assert_numpy_array_equal(res.__array__(), exp)
self.assert_index_equal(res.categories, exp_categories)
exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan])
res = cat.sort_values(ascending=True, na_position='last')
self.assert_numpy_array_equal(res.__array__(), exp)
self.assert_index_equal(res.categories, exp_categories)
exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan])
res = cat.sort_values(ascending=False, na_position='last')
self.assert_numpy_array_equal(res.__array__(), exp)
self.assert_index_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = Index(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_index_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = Index(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_index_equal(res.categories, exp_categories)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
self.assertEqual(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
tm.assert_categorical_equal(cat, exp)
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1], dtype=np.int8)
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1], dtype=np.int8)
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1], dtype=np.int8)
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1], dtype=np.int8)
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2], dtype=np.int8)
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pandas-dev/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1], dtype=np.intp)
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1], dtype=np.intp)
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4], dtype=np.intp)
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4], dtype=np.intp) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
# eggs after donuts, after switching milk and donuts
exp = np.array([3, 5], dtype=np.intp)
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
def test_deprecated_from_array(self):
# GH13854, `.from_array` is deprecated
with tm.assert_produces_warning(FutureWarning):
Categorical.from_array([0, 1])
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0],
np.array([False, True, True]))
self.assert_numpy_array_equal(dt_cat[0] < dt_cat,
np.array([False, True, True]))
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0],
np.array([False, True, True]))
self.assert_numpy_array_equal(cat[0] < cat,
np.array([False, True, True]))
def test_comparison_with_unknown_scalars(self):
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4,
np.array([False, False, False]))
self.assert_numpy_array_equal(cat != 4,
np.array([True, True, True]))
def test_map(self):
c = pd.Categorical(list('ABABC'), categories=list('CBA'),
ordered=True)
result = c.map(lambda x: x.lower())
exp = pd.Categorical(list('ababc'), categories=list('cba'),
ordered=True)
tm.assert_categorical_equal(result, exp)
c = pd.Categorical(list('ABABC'), categories=list('ABC'),
ordered=False)
result = c.map(lambda x: x.lower())
exp = pd.Categorical(list('ababc'), categories=list('abc'),
ordered=False)
tm.assert_categorical_equal(result, exp)
result = c.map(lambda x: 1)
tm.assert_numpy_array_equal(result, np.array([1] * 5, dtype=np.int64))
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],
[1, 'John P. Doe']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_index_equal(s.cat.categories, Index(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes,
np.array([0, 1, -1, 0], dtype=np.int8))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan]))
exp_cat = Index(["a", "b", np.nan])
self.assert_index_equal(s2.cat.categories, exp_cat)
self.assert_numpy_array_equal(s2.values.codes,
np.array([0, 1, 2, 0], dtype=np.int8))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
exp_cat = Index(["a", "b", np.nan])
self.assert_index_equal(s3.cat.categories, exp_cat)
self.assert_numpy_array_equal(s3.values.codes,
np.array([0, 1, 2, 0], dtype=np.int8))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_index_equal(s.cat.categories, Index(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
tm.assert_categorical_equal(s.values, exp)
res = s.cat.set_categories(["b", "a"])
tm.assert_categorical_equal(res.values, exp)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_index_equal(s.cat.categories, Index(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = Index(["a", "b", "c"])
tm.assert_index_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = Index([1, 2, 3])
self.assert_index_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = Index(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
s = s.cat.set_categories(["c", "b", "a"])
tm.assert_index_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = Index(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"], dtype=np.object_)
s = s.cat.remove_unused_categories()
self.assert_index_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(np.random.randint(0, 10000, 100),
dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), CategoricalDtype(),
CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assert_categorical_equal(result1._data._block.values, d)
# sorting
s.name = 'E'
self.assert_series_equal(result2.sort_index(), s.sort_index())
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
# GH 12835
cats = pd.Categorical(["a", "b", "c", "c", "c", "b"],
categories=["c", "a", "b", "d"])
s = pd.Series(cats, name='xxx')
res = s.value_counts(sort=False)
exp_index = pd.CategoricalIndex(["c", "a", "b", "d"],
categories=cats.categories)
exp = Series([3, 1, 2, 0], name='xxx', index=exp_index)
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp_index = pd.CategoricalIndex(["c", "b", "a", "d"],
categories=cats.categories)
exp = Series([3, 2, 1, 0], name='xxx', index=exp_index)
tm.assert_series_equal(res, exp)
# check object dtype handles the Series.name as the same
# (tested in test_base.py)
s = pd.Series(["a", "b", "c", "c", "c", "b"], name='xxx')
res = s.value_counts()
exp = Series([3, 2, 1], name='xxx', index=["c", "b", "a"])
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# see gh-9443
# sanity check
s = pd.Series(["a", "b", "a"], dtype="category")
exp = pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"]))
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
# same Series via two different constructions --> same behaviour
series = [
pd.Series(["a", "b", None, "a", None, None], dtype="category"),
pd.Series(pd.Categorical(["a", "b", None, "a", None, None],
categories=["a", "b"]))
]
for s in series:
# None is a NaN value, so we exclude its count here
exp = pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"]))
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
# we don't exclude the count of None and sort by counts
exp = pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"]))
res = s.value_counts(dropna=False)
tm.assert_series_equal(res, exp)
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
exp = pd.Series([2, 1, 3], index=pd.CategoricalIndex(["a", "b", np.nan]))
res = s.value_counts(dropna=False, sort=False)
tm.assert_series_equal(res, exp)
def test_groupby(self):
cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = pd.CategoricalIndex(['a', 'b', 'c', 'd'], name='b',
ordered=True)
expected = DataFrame({'a': [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True)
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
exp_index = pd.MultiIndex.from_product(
[Categorical(["a", "b", "z"], ordered=True),
Categorical(["c", "d", "y"], ordered=True)],
names=['A', 'B'])
expected = DataFrame({'values': [1, 2, np.nan, 3, 4, np.nan,
np.nan, np.nan, np.nan]},
index=exp_index)
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
exp_index = pd.MultiIndex.from_product(
[Categorical(["a", "b", "z"], ordered=True),
Categorical(["c", "d", "y"], ordered=True),
['foo', 'bar']],
names=['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=exp_index)}).sortlevel()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],
[1, 'John P. Doe']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'])
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'])
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
exp_index = pd.CategoricalIndex(c.values.categories,
ordered=c.values.ordered)
expected = pd.Series([1, 0, 0, 0], index=exp_index)
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
exp_index = pd.MultiIndex.from_product(
[Categorical(["a", "b", "z"], ordered=True),
Categorical(["c", "d", "y"], ordered=True)],
names=['A', 'B'])
expected = Series([1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan],
index=exp_index, name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort_values(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c.copy())
# 'order' was deprecated in gh-10726
# 'sort' was deprecated in gh-12882
for func in ('order', 'sort'):
with tm.assert_produces_warning(FutureWarning):
getattr(c, func)()
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=np.object_)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=np.object_)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=np.object_)
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=np.object_)
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_series_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1], dtype=np.int64)
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, '(0, 25]'], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, '(0, 25]'], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = pd.Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2, 0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(is_categorical_dtype(res_col))
# single value
res_val = df.loc["j", "cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
# res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(is_categorical_dtype(res_col))
# single value
res_val = df.ix["j", 0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2, 0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j", "cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.get_value("j", "cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(is_categorical_dtype(res_df["cats"]))
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(is_categorical_dtype(res_col))
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
self.assertTrue(is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
self.assertTrue(is_categorical_dtype(res_df["cats"]))
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(["a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.ix["h":"j", 0:1]
expected = DataFrame({'cats': Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c'])},
index=['h', 'i', 'j'])
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = pd.Categorical(["a", "a", "a", "a", "a", "a", "a"],
categories=["a", "b"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = pd.Categorical(["a", "a", "b", "a", "a", "a", "a"],
categories=["a", "b"])
idx1 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = pd.DataFrame({"cats": cats1,
"values": values1}, index=idx1)
# changed multiple rows
cats2 = pd.Categorical(["a", "a", "b", "b", "a", "a", "a"],
categories=["a", "b"])
idx2 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = pd.DataFrame({"cats": cats2,
"values": values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = pd.DataFrame(
{"cats": cats3,
"values": values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = pd.DataFrame(
{"cats": cats4,
"values": values4}, index=idx4)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2, 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2, :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.loc["j":"k", "cats"] = ["c", "c"]
# ix
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.ix["j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.ix[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.ix["j", 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.ix["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.ix["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.ix["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.ix["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(["c", "c"],
categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.ix["j":"k", 0] = ["c", "c"]
# iat
df = orig.copy()
df.iat[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iat[2, 0] = "c"
self.assertRaises(ValueError, f)
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.at["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# fancy indexing
catsf = pd.Categorical(["a", "a", "c", "c", "a", "a", "a"],
categories=["a", "b", "c"])
idxf = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = pd.DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
df[df["cats"] == "c"] = ["b", 2]
# category c is kept in .categories
tm.assert_frame_equal(df, exp_fancy)
# set_value
df = orig.copy()
df.set_value("j", "cats", "b")
tm.assert_frame_equal(df, exp_single_cats_value)
def f():
df = orig.copy()
df.set_value("j", "cats", "c")
self.assertRaises(ValueError, f)
# Assigning a Category to parts of a int/... column uses the values of
# the Catgorical
df = pd.DataFrame({"a": [1, 1, 1, 1, 1],
"b": ["a", "a", "a", "a", "a"]})
exp = pd.DataFrame({"a": [1, "b", "b", 1, 1],
"b": ["a", "a", "b", "b", "a"]})
df.loc[1:2, "a"] = pd.Categorical(["b", "b"], categories=["a", "b"])
df.loc[2:3, "b"] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp)
# Series
orig = Series(pd.Categorical(["b", "b"], categories=["a", "b"]))
s = orig.copy()
s[:] = "a"
exp = Series(pd.Categorical(["a", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]),
index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1, 2, 3]))
exp = Series(Categorical([1, np.nan, 3], categories=[1, 2, 3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_comparisons(self):
tests_data = [(list("abc"), list("cba"), list("bbb")),
([1, 2, 3], [3, 2, 1], [2, 2, 2])]
for data, reverse, base in tests_data:
cat_rev = pd.Series(pd.Categorical(data, categories=reverse,
ordered=True))
cat_rev_base = pd.Series(pd.Categorical(base, categories=reverse,
ordered=True))
cat = pd.Series(pd.Categorical(data, ordered=True))
cat_base = pd.Series(pd.Categorical(
base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
def f():
cat > "b"
self.assertRaises(TypeError, f)
cat = Series(Categorical(list("abc"), ordered=False))
def f():
cat > "b"
self.assertRaises(TypeError, f)
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
self.assertRaises(TypeError, lambda: cat < "d")
self.assertRaises(TypeError, lambda: cat > "d")
self.assertRaises(TypeError, lambda: "d" < cat)
self.assertRaises(TypeError, lambda: "d" > cat)
self.assert_series_equal(cat == "d", Series([False, False, False]))
self.assert_series_equal(cat != "d", Series([True, True, True]))
# And test NaN handling...
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_cat_equality(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'), dtype="category")
b = Series(list('abc'), dtype="object")
c = Series(['a', 'b', 'cc'], dtype="object")
d = Series(list('acb'), dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
self.assertFalse((a == 'a').all())
self.assertTrue(((a != 'a') == ~(a == 'a')).all())
self.assertFalse(('a' == a).all())
self.assertTrue((a == 'a')[0])
self.assertTrue(('a' == a)[0])
self.assertFalse(('a' != a)[0])
# vs list-like
self.assertTrue((a == a).all())
self.assertFalse((a != a).all())
self.assertTrue((a == list(a)).all())
self.assertTrue((a == b).all())
self.assertTrue((b == a).all())
self.assertTrue(((~(a == b)) == (a != b)).all())
self.assertTrue(((~(b == a)) == (b != a)).all())
self.assertFalse((a == c).all())
self.assertFalse((c == a).all())
self.assertFalse((a == d).all())
self.assertFalse((d == a).all())
# vs a cat-like
self.assertTrue((a == e).all())
self.assertTrue((e == a).all())
self.assertFalse((a == f).all())
self.assertFalse((f == a).all())
self.assertTrue(((~(a == e) == (a != e)).all()))
self.assertTrue(((~(e == a) == (e != a)).all()))
self.assertTrue(((~(a == f) == (a != f)).all()))
self.assertTrue(((~(f == a) == (f != a)).all()))
# non-equality is not comparable
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: b < a)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: b > a)
def test_concat_append(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = pd.DataFrame({"cats": cat2,
"vals": vals2}, index=pd.Index([0, 1, 0, 1]))
tm.assert_frame_equal(pd.concat([df, df]), exp)
tm.assert_frame_equal(df.append(df), exp)
# GH 13524 can concat different categories
cat3 = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_different_categories = pd.DataFrame({"cats": cat3, "vals": vals3})
res = pd.concat([df, df_different_categories], ignore_index=True)
exp = pd.DataFrame({"cats": list('abab'), "vals": [1, 2, 1, 2]})
tm.assert_frame_equal(res, exp)
res = df.append(df_different_categories, ignore_index=True)
tm.assert_frame_equal(res, exp)
def test_concat_append_gh7864(self):
# GH 7864
# make sure ordering is preserverd
df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"])
df['grade'].cat.set_categories(['e', 'a', 'b'])
df1 = df[0:3]
df2 = df[3:]
self.assert_index_equal(df['grade'].cat.categories,
df1['grade'].cat.categories)
self.assert_index_equal(df['grade'].cat.categories,
df2['grade'].cat.categories)
dfx = pd.concat([df1, df2])
self.assert_index_equal(df['grade'].cat.categories,
dfx['grade'].cat.categories)
dfa = df1.append(df2)
self.assert_index_equal(df['grade'].cat.categories,
dfa['grade'].cat.categories)
def test_concat_preserve(self):
# GH 8641 series concat not preserving category dtype
# GH 13524 can concat different categories
s = Series(list('abc'), dtype='category')
s2 = Series(list('abd'), dtype='category')
exp = Series(list('abcabd'))
res = pd.concat([s, s2], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list('abcabc'), dtype='category')
res = pd.concat([s, s], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list('abcabc'), index=[0, 1, 2, 0, 1, 2],
dtype='category')
res = pd.concat([s, s])
tm.assert_series_equal(res, exp)
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list('cab'))})
res = pd.concat([df2, df2])
exp = DataFrame({'A': pd.concat([a, a]),
'B': pd.concat([b, b]).astype(
'category', categories=list('cab'))})
tm.assert_frame_equal(res, exp)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list('cab'))
}).set_index('B')
result = pd.concat([df2, df2])
expected = DataFrame({'A': pd.concat([a, a]),
'B': pd.concat([b, b]).astype(
'category', categories=list('cab'))
}).set_index('B')
tm.assert_frame_equal(result, expected)
# wrong catgories
df3 = DataFrame({'A': a,
'B': pd.Categorical(b, categories=list('abc'))
}).set_index('B')
self.assertRaises(TypeError, lambda: pd.concat([df2, df3]))
def test_merge(self):
# GH 9426
right = DataFrame({'c': {0: 'a',
1: 'b',
2: 'c',
3: 'd',
4: 'e'},
'd': {0: 'null',
1: 'null',
2: 'null',
3: 'null',
4: 'null'}})
left = DataFrame({'a': {0: 'f',
1: 'f',
2: 'f',
3: 'f',
4: 'f'},
'b': {0: 'g',
1: 'g',
2: 'g',
3: 'g',
4: 'g'}})
df = pd.merge(left, right, how='left', left_on='b', right_on='c')
# object-object
expected = df.copy()
# object-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
result = pd.merge(left, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-object
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
def test_repeat(self):
# GH10183
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
exp = pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"])
res = cat.repeat(2)
self.assert_categorical_equal(res, exp)
def test_numpy_repeat(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
exp = pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"])
self.assert_categorical_equal(np.repeat(cat, 2), exp)
msg = "the 'axis' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.repeat, cat, 2, axis=1)
def test_reshape(self):
cat = pd.Categorical([], categories=["a", "b"])
tm.assert_produces_warning(FutureWarning, cat.reshape, 0)
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([], categories=["a", "b"])
self.assert_categorical_equal(cat.reshape(0), cat)
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([], categories=["a", "b"])
self.assert_categorical_equal(cat.reshape((5, -1)), cat)
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
self.assert_categorical_equal(cat.reshape(cat.shape), cat)
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
self.assert_categorical_equal(cat.reshape(cat.size), cat)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
msg = "can only specify one unknown dimension"
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
tm.assertRaisesRegexp(ValueError, msg, cat.reshape, (-2, -1))
def test_numpy_reshape(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
self.assert_categorical_equal(np.reshape(cat, cat.shape), cat)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
msg = "the 'order' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.reshape,
cat, cat.shape, order='F')
def test_na_actions(self):
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
vals = ["a", "b", np.nan, "d"]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical([1, 2, 3, 3], categories=[1, 2, 3])
vals2 = ["a", "b", "b", "d"]
df_exp_fill = pd.DataFrame({"cats": cat2, "vals": vals2})
cat3 = pd.Categorical([1, 2, 3], categories=[1, 2, 3])
vals3 = ["a", "b", np.nan]
df_exp_drop_cats = pd.DataFrame({"cats": cat3, "vals": vals3})
cat4 = pd.Categorical([1, 2], categories=[1, 2, 3])
vals4 = ["a", "b"]
df_exp_drop_all = pd.DataFrame({"cats": cat4, "vals": vals4})
# fillna
res = df.fillna(value={"cats": 3, "vals": "b"})
tm.assert_frame_equal(res, df_exp_fill)
def f():
df.fillna(value={"cats": 4, "vals": "c"})
self.assertRaises(ValueError, f)
res = df.fillna(method='pad')
tm.assert_frame_equal(res, df_exp_fill)
res = df.dropna(subset=["cats"])
tm.assert_frame_equal(res, df_exp_drop_cats)
res = df.dropna()
tm.assert_frame_equal(res, df_exp_drop_all)
# make sure that fillna takes missing values into account
c = Categorical([np.nan, "b", np.nan], categories=["a", "b"])
df = pd.DataFrame({"cats": c, "vals": [1, 2, 3]})
cat_exp = Categorical(["a", "b", "a"], categories=["a", "b"])
df_exp = pd.DataFrame({"cats": cat_exp, "vals": [1, 2, 3]})
res = df.fillna("a")
tm.assert_frame_equal(res, df_exp)
# GH 14021
# np.nan should always be a is a valid filler
cat = Categorical([np.nan, 2, np.nan])
val = Categorical([np.nan, np.nan, np.nan])
df = DataFrame({"cats": cat, "vals": val})
res = df.fillna(df.median())
v_exp = [np.nan, np.nan, np.nan]
df_exp = pd.DataFrame({"cats": [2, 2, 2], "vals": v_exp},
dtype='category')
tm.assert_frame_equal(res, df_exp)
result = df.cats.fillna(np.nan)
tm.assert_series_equal(result, df.cats)
result = df.vals.fillna(np.nan)
tm.assert_series_equal(result, df.vals)
idx = pd.DatetimeIndex(['2011-01-01 09:00', '2016-01-01 23:45',
'2011-01-01 09:00', pd.NaT, pd.NaT])
df = DataFrame({'a': pd.Categorical(idx)})
tm.assert_frame_equal(df.fillna(value=pd.NaT), df)
idx = pd.PeriodIndex(['2011-01', '2011-01', '2011-01',
pd.NaT, pd.NaT], freq='M')
df = DataFrame({'a': pd.Categorical(idx)})
tm.assert_frame_equal(df.fillna(value=pd.NaT), df)
idx = pd.TimedeltaIndex(['1 days', '2 days',
'1 days', pd.NaT, pd.NaT])
df = pd.DataFrame({'a': pd.Categorical(idx)})
tm.assert_frame_equal(df.fillna(value=pd.NaT), df)
def test_astype_to_other(self):
s = self.cat['value_group']
expected = s
tm.assert_series_equal(s.astype('category'), expected)
tm.assert_series_equal(s.astype(CategoricalDtype()), expected)
self.assertRaises(ValueError, lambda: s.astype('float64'))
cat = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
exp = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
tm.assert_series_equal(cat.astype('str'), exp)
s2 = Series(Categorical(['1', '2', '3', '4']))
exp2 = Series([1, 2, 3, 4]).astype(int)
tm.assert_series_equal(s2.astype('int'), exp2)
# object don't sort correctly, so just compare that we have the same
# values
def cmp(a, b):
tm.assert_almost_equal(
np.sort(np.unique(a)), np.sort(np.unique(b)))
expected = Series(np.array(s.values), name='value_group')
cmp(s.astype('object'), expected)
cmp(s.astype(np.object_), expected)
# array conversion
tm.assert_almost_equal(np.array(s), np.array(s.values))
# valid conversion
for valid in [lambda x: x.astype('category'),
lambda x: x.astype(CategoricalDtype()),
lambda x: x.astype('object').astype('category'),
lambda x: x.astype('object').astype(
CategoricalDtype())
]:
result = valid(s)
# compare series values
# internal .categories can't be compared because it is sorted
tm.assert_series_equal(result, s, check_categorical=False)
# invalid conversion (these are NOT a dtype)
for invalid in [lambda x: x.astype(pd.Categorical),
lambda x: x.astype('object').astype(pd.Categorical)]:
self.assertRaises(TypeError, lambda: invalid(s))
def test_astype_categorical(self):
cat = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
tm.assert_categorical_equal(cat, cat.astype('category'))
tm.assert_almost_equal(np.array(cat), cat.astype('object'))
self.assertRaises(ValueError, lambda: cat.astype(float))
def test_to_records(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# to record array
# this coerces
result = df.to_records()
expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],
dtype=[('index', '=i8'), ('0', 'O')])
tm.assert_almost_equal(result, expected)
def test_numeric_like_ops(self):
# numeric ops should not succeed
for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
self.assertRaises(TypeError,
lambda: getattr(self.cat, op)(self.cat))
# reduction ops should not succeed (unless specifically defined, e.g.
# min/max)
s = self.cat['value_group']
for op in ['kurt', 'skew', 'var', 'std', 'mean', 'sum', 'median']:
self.assertRaises(TypeError,
lambda: getattr(s, op)(numeric_only=False))
# mad technically works because it takes always the numeric data
# numpy ops
s = pd.Series(pd.Categorical([1, 2, 3, 4]))
self.assertRaises(TypeError, lambda: np.sum(s))
# numeric ops on a Series
for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
self.assertRaises(TypeError, lambda: getattr(s, op)(2))
# invalid ufunc
self.assertRaises(TypeError, lambda: np.log(s))
def test_cat_tab_completition(self):
# test the tab completion display
ok_for_cat = ['categories', 'codes', 'ordered', 'set_categories',
'add_categories', 'remove_categories',
'rename_categories', 'reorder_categories',
'remove_unused_categories', 'as_ordered', 'as_unordered']
def get_dir(s):
results = [r for r in s.cat.__dir__() if not r.startswith('_')]
return list(sorted(set(results)))
s = Series(list('aabbcde')).astype('category')
results = get_dir(s)
tm.assert_almost_equal(results, list(sorted(set(ok_for_cat))))
def test_cat_accessor_api(self):
# GH 9322
from pandas.core.categorical import CategoricalAccessor
self.assertIs(Series.cat, CategoricalAccessor)
s = Series(list('aabbcde')).astype('category')
self.assertIsInstance(s.cat, CategoricalAccessor)
invalid = Series([1])
with tm.assertRaisesRegexp(AttributeError, "only use .cat accessor"):
invalid.cat
self.assertFalse(hasattr(invalid, 'cat'))
def test_cat_accessor_no_new_attributes(self):
# https://github.com/pandas-dev/pandas/issues/10673
c = Series(list('aabbcde')).astype('category')
with tm.assertRaisesRegexp(AttributeError,
"You cannot add any new attribute"):
c.cat.xlabel = "a"
def test_str_accessor_api_for_categorical(self):
# https://github.com/pandas-dev/pandas/issues/10661
from pandas.core.strings import StringMethods
s = Series(list('aabb'))
s = s + " " + s
c = s.astype('category')
self.assertIsInstance(c.str, StringMethods)
# str functions, which need special arguments
special_func_defs = [
('cat', (list("zyxw"),), {"sep": ","}),
('center', (10,), {}),
('contains', ("a",), {}),
('count', ("a",), {}),
('decode', ("UTF-8",), {}),
('encode', ("UTF-8",), {}),
('endswith', ("a",), {}),
('extract', ("([a-z]*) ",), {"expand":False}),
('extract', ("([a-z]*) ",), {"expand":True}),
('extractall', ("([a-z]*) ",), {}),
('find', ("a",), {}),
('findall', ("a",), {}),
('index', (" ",), {}),
('ljust', (10,), {}),
('match', ("a"), {}), # deprecated...
('normalize', ("NFC",), {}),
('pad', (10,), {}),
('partition', (" ",), {"expand": False}), # not default
('partition', (" ",), {"expand": True}), # default
('repeat', (3,), {}),
('replace', ("a", "z"), {}),
('rfind', ("a",), {}),
('rindex', (" ",), {}),
('rjust', (10,), {}),
('rpartition', (" ",), {"expand": False}), # not default
('rpartition', (" ",), {"expand": True}), # default
('slice', (0, 1), {}),
('slice_replace', (0, 1, "z"), {}),
('split', (" ",), {"expand": False}), # default
('split', (" ",), {"expand": True}), # not default
('startswith', ("a",), {}),
('wrap', (2,), {}),
('zfill', (10,), {})
]
_special_func_names = [f[0] for f in special_func_defs]
# * get, join: they need a individual elements of type lists, but
# we can't make a categorical with lists as individual categories.
# -> `s.str.split(" ").astype("category")` will error!
# * `translate` has different interfaces for py2 vs. py3
_ignore_names = ["get", "join", "translate"]
str_func_names = [f
for f in dir(s.str)
if not (f.startswith("_") or f in _special_func_names
or f in _ignore_names)]
func_defs = [(f, (), {}) for f in str_func_names]
func_defs.extend(special_func_defs)
for func, args, kwargs in func_defs:
res = getattr(c.str, func)(*args, **kwargs)
exp = getattr(s.str, func)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
else:
tm.assert_series_equal(res, exp)
invalid = Series([1, 2, 3]).astype('category')
with tm.assertRaisesRegexp(AttributeError,
"Can only use .str accessor with string"):
invalid.str
self.assertFalse(hasattr(invalid, 'str'))
def test_dt_accessor_api_for_categorical(self):
# https://github.com/pandas-dev/pandas/issues/10661
from pandas.tseries.common import Properties
from pandas.tseries.index import date_range, DatetimeIndex
from pandas.tseries.period import period_range, PeriodIndex
from pandas.tseries.tdi import timedelta_range, TimedeltaIndex
s_dr = Series(date_range('1/1/2015', periods=5, tz="MET"))
c_dr = s_dr.astype("category")
s_pr = Series(period_range('1/1/2015', freq='D', periods=5))
c_pr = s_pr.astype("category")
s_tdr = Series(timedelta_range('1 days', '10 days'))
c_tdr = s_tdr.astype("category")
test_data = [
("Datetime", DatetimeIndex._datetimelike_ops, s_dr, c_dr),
("Period", PeriodIndex._datetimelike_ops, s_pr, c_pr),
("Timedelta", TimedeltaIndex._datetimelike_ops, s_tdr, c_tdr)]
self.assertIsInstance(c_dr.dt, Properties)
special_func_defs = [
('strftime', ("%Y-%m-%d",), {}),
('tz_convert', ("EST",), {}),
('round', ("D",), {}),
('floor', ("D",), {}),
('ceil', ("D",), {}),
# ('tz_localize', ("UTC",), {}),
]
_special_func_names = [f[0] for f in special_func_defs]
# the series is already localized
_ignore_names = ['tz_localize']
for name, attr_names, s, c in test_data:
func_names = [f
for f in dir(s.dt)
if not (f.startswith("_") or f in attr_names or f in
_special_func_names or f in _ignore_names)]
func_defs = [(f, (), {}) for f in func_names]
for f_def in special_func_defs:
if f_def[0] in dir(s.dt):
func_defs.append(f_def)
for func, args, kwargs in func_defs:
res = getattr(c.dt, func)(*args, **kwargs)
exp = getattr(s.dt, func)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, pd.Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_numpy_array_equal(res, exp)
for attr in attr_names:
try:
res = getattr(c.dt, attr)
exp = getattr(s.dt, attr)
except Exception as e:
print(name, attr)
raise e
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, pd.Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_numpy_array_equal(res, exp)
invalid = Series([1, 2, 3]).astype('category')
with tm.assertRaisesRegexp(
AttributeError, "Can only use .dt accessor with datetimelike"):
invalid.dt
self.assertFalse(hasattr(invalid, 'str'))
def test_concat_categorical(self):
# See GH 10177
df1 = pd.DataFrame(np.arange(18, dtype='int64').reshape(6, 3),
columns=["a", "b", "c"])
df2 = pd.DataFrame(np.arange(14, dtype='int64').reshape(7, 2),
columns=["a", "c"])
cat_values = ["one", "one", "two", "one", "two", "two", "one"]
df2['h'] = pd.Series(pd.Categorical(cat_values))
res = pd.concat((df1, df2), axis=0, ignore_index=True)
exp = pd.DataFrame({'a': [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
'b': [1, 4, 7, 10, 13, 16, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
'c': [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
'h': [None] * 6 + cat_values})
tm.assert_frame_equal(res, exp)
class TestCategoricalSubclassing(tm.TestCase):
_multiprocess_can_split_ = True
def test_constructor(self):
sc = tm.SubclassedCategorical(['a', 'b', 'c'])
self.assertIsInstance(sc, tm.SubclassedCategorical)
tm.assert_categorical_equal(sc, Categorical(['a', 'b', 'c']))
def test_from_array(self):
sc = tm.SubclassedCategorical.from_codes([1, 0, 2], ['a', 'b', 'c'])
self.assertIsInstance(sc, tm.SubclassedCategorical)
exp = Categorical.from_codes([1, 0, 2], ['a', 'b', 'c'])
tm.assert_categorical_equal(sc, exp)
def test_map(self):
sc = tm.SubclassedCategorical(['a', 'b', 'c'])
res = sc.map(lambda x: x.upper())
self.assertIsInstance(res, tm.SubclassedCategorical)
exp = Categorical(['A', 'B', 'C'])
tm.assert_categorical_equal(res, exp)
def test_map(self):
sc = tm.SubclassedCategorical(['a', 'b', 'c'])
res = sc.map(lambda x: x.upper())
self.assertIsInstance(res, tm.SubclassedCategorical)
exp = Categorical(['A', 'B', 'C'])
tm.assert_categorical_equal(res, exp)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core']
exit=False)
| mit |
bgris/ODL_bgris | lib/python3.5/site-packages/mpl_toolkits/axes_grid1/inset_locator.py | 10 | 18698 | """
A collection of functions and objects for creating or placing inset axes.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib import docstring
import six
from matplotlib.offsetbox import AnchoredOffsetbox
from matplotlib.patches import Patch, Rectangle
from matplotlib.path import Path
from matplotlib.transforms import Bbox, BboxTransformTo
from matplotlib.transforms import IdentityTransform, TransformedBbox
from . import axes_size as Size
from .parasite_axes import HostAxes
class InsetPosition(object):
@docstring.dedent_interpd
def __init__(self, parent, lbwh):
"""
An object for positioning an inset axes.
This is created by specifying the normalized coordinates in the axes,
instead of the figure.
Parameters
----------
parent : `matplotlib.axes.Axes`
Axes to use for normalizing coordinates.
lbwh : iterable of four floats
The left edge, bottom edge, width, and height of the inset axes, in
units of the normalized coordinate of the *parent* axes.
See Also
--------
:meth:`matplotlib.axes.Axes.set_axes_locator`
Examples
--------
The following bounds the inset axes to a box with 20%% of the parent
axes's height and 40%% of the width. The size of the axes specified
([0, 0, 1, 1]) ensures that the axes completely fills the bounding box:
>>> parent_axes = plt.gca()
>>> ax_ins = plt.axes([0, 0, 1, 1])
>>> ip = InsetPosition(ax, [0.5, 0.1, 0.4, 0.2])
>>> ax_ins.set_axes_locator(ip)
"""
self.parent = parent
self.lbwh = lbwh
def __call__(self, ax, renderer):
bbox_parent = self.parent.get_position(original=False)
trans = BboxTransformTo(bbox_parent)
bbox_inset = Bbox.from_bounds(*self.lbwh)
bb = TransformedBbox(bbox_inset, trans)
return bb
class AnchoredLocatorBase(AnchoredOffsetbox):
def __init__(self, bbox_to_anchor, offsetbox, loc,
borderpad=0.5, bbox_transform=None):
super(AnchoredLocatorBase, self).__init__(
loc, pad=0., child=None, borderpad=borderpad,
bbox_to_anchor=bbox_to_anchor, bbox_transform=bbox_transform
)
def draw(self, renderer):
raise RuntimeError("No draw method should be called")
def __call__(self, ax, renderer):
self.axes = ax
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, 0, 0, renderer)
bbox_canvas = Bbox.from_bounds(px, py, width, height)
tr = ax.figure.transFigure.inverted()
bb = TransformedBbox(bbox_canvas, tr)
return bb
class AnchoredSizeLocator(AnchoredLocatorBase):
def __init__(self, bbox_to_anchor, x_size, y_size, loc,
borderpad=0.5, bbox_transform=None):
super(AnchoredSizeLocator, self).__init__(
bbox_to_anchor, None, loc,
borderpad=borderpad, bbox_transform=bbox_transform
)
self.x_size = Size.from_any(x_size)
self.y_size = Size.from_any(y_size)
def get_extent(self, renderer):
x, y, w, h = self.get_bbox_to_anchor().bounds
dpi = renderer.points_to_pixels(72.)
r, a = self.x_size.get_size(renderer)
width = w*r + a*dpi
r, a = self.y_size.get_size(renderer)
height = h*r + a*dpi
xd, yd = 0, 0
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return width+2*pad, height+2*pad, xd+pad, yd+pad
class AnchoredZoomLocator(AnchoredLocatorBase):
def __init__(self, parent_axes, zoom, loc,
borderpad=0.5,
bbox_to_anchor=None,
bbox_transform=None):
self.parent_axes = parent_axes
self.zoom = zoom
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
super(AnchoredZoomLocator, self).__init__(
bbox_to_anchor, None, loc, borderpad=borderpad,
bbox_transform=bbox_transform)
def get_extent(self, renderer):
bb = TransformedBbox(self.axes.viewLim,
self.parent_axes.transData)
x, y, w, h = bb.bounds
xd, yd = 0, 0
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return w*self.zoom+2*pad, h*self.zoom+2*pad, xd+pad, yd+pad
class BboxPatch(Patch):
@docstring.dedent_interpd
def __init__(self, bbox, **kwargs):
"""
Patch showing the shape bounded by a Bbox.
Parameters
----------
bbox : `matplotlib.transforms.Bbox`
Bbox to use for the extents of this patch.
**kwargs
Patch properties. Valid arguments include:
%(Patch)s
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
Patch.__init__(self, **kwargs)
self.bbox = bbox
def get_path(self):
x0, y0, x1, y1 = self.bbox.extents
verts = [(x0, y0),
(x1, y0),
(x1, y1),
(x0, y1),
(x0, y0),
(0, 0)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY]
return Path(verts, codes)
get_path.__doc__ = Patch.get_path.__doc__
class BboxConnector(Patch):
@staticmethod
def get_bbox_edge_pos(bbox, loc):
"""
Helper function to obtain the location of a corner of a bbox
Parameters
----------
bbox : `matplotlib.transforms.Bbox`
loc : {1, 2, 3, 4}
Corner of *bbox*. Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
Returns
-------
x, y : float
Coordinates of the corner specified by *loc*.
"""
x0, y0, x1, y1 = bbox.extents
if loc == 1:
return x1, y1
elif loc == 2:
return x0, y1
elif loc == 3:
return x0, y0
elif loc == 4:
return x1, y0
@staticmethod
def connect_bbox(bbox1, bbox2, loc1, loc2=None):
"""
Helper function to obtain a Path from one bbox to another.
Parameters
----------
bbox1, bbox2 : `matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1 : {1, 2, 3, 4}
Corner of *bbox1* to use. Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
loc2 : {1, 2, 3, 4}, optional
Corner of *bbox2* to use. If None, defaults to *loc1*.
Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
Returns
-------
path : `matplotlib.path.Path`
A line segment from the *loc1* corner of *bbox1* to the *loc2*
corner of *bbox2*.
"""
if isinstance(bbox1, Rectangle):
transform = bbox1.get_transfrom()
bbox1 = Bbox.from_bounds(0, 0, 1, 1)
bbox1 = TransformedBbox(bbox1, transform)
if isinstance(bbox2, Rectangle):
transform = bbox2.get_transform()
bbox2 = Bbox.from_bounds(0, 0, 1, 1)
bbox2 = TransformedBbox(bbox2, transform)
if loc2 is None:
loc2 = loc1
x1, y1 = BboxConnector.get_bbox_edge_pos(bbox1, loc1)
x2, y2 = BboxConnector.get_bbox_edge_pos(bbox2, loc2)
verts = [[x1, y1], [x2, y2]]
codes = [Path.MOVETO, Path.LINETO]
return Path(verts, codes)
@docstring.dedent_interpd
def __init__(self, bbox1, bbox2, loc1, loc2=None, **kwargs):
"""
Connect two bboxes with a straight line.
Parameters
----------
bbox1, bbox2 : `matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1 : {1, 2, 3, 4}
Corner of *bbox1* to draw the line. Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
loc2 : {1, 2, 3, 4}, optional
Corner of *bbox2* to draw the line. If None, defaults to *loc1*.
Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
**kwargs
Patch properties for the line drawn. Valid arguments include:
%(Patch)s
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
Patch.__init__(self, fill=False, **kwargs)
self.bbox1 = bbox1
self.bbox2 = bbox2
self.loc1 = loc1
self.loc2 = loc2
def get_path(self):
return self.connect_bbox(self.bbox1, self.bbox2,
self.loc1, self.loc2)
get_path.__doc__ = Patch.get_path.__doc__
class BboxConnectorPatch(BboxConnector):
@docstring.dedent_interpd
def __init__(self, bbox1, bbox2, loc1a, loc2a, loc1b, loc2b, **kwargs):
"""
Connect two bboxes with a quadrilateral.
The quadrilateral is specified by two lines that start and end at corners
of the bboxes. The four sides of the quadrilateral are defined by the two
lines given, the line between the two corners specified in *bbox1* and the
line between the two corners specified in *bbox2*.
Parameters
----------
bbox1, bbox2 : `matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1a, loc2a : {1, 2, 3, 4}
Corners of *bbox1* and *bbox2* to draw the first line.
Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
loc1b, loc2b : {1, 2, 3, 4}
Corners of *bbox1* and *bbox2* to draw the second line.
Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
**kwargs
Patch properties for the line drawn:
%(Patch)s
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
BboxConnector.__init__(self, bbox1, bbox2, loc1a, loc2a, **kwargs)
self.loc1b = loc1b
self.loc2b = loc2b
def get_path(self):
path1 = self.connect_bbox(self.bbox1, self.bbox2, self.loc1, self.loc2)
path2 = self.connect_bbox(self.bbox2, self.bbox1,
self.loc2b, self.loc1b)
path_merged = (list(path1.vertices) +
list(path2.vertices) +
[path1.vertices[0]])
return Path(path_merged)
get_path.__doc__ = BboxConnector.get_path.__doc__
def _add_inset_axes(parent_axes, inset_axes):
"""Helper function to add an inset axes and disable navigation in it"""
parent_axes.figure.add_axes(inset_axes)
inset_axes.set_navigate(False)
@docstring.dedent_interpd
def inset_axes(parent_axes, width, height, loc=1,
bbox_to_anchor=None, bbox_transform=None,
axes_class=None,
axes_kwargs=None,
borderpad=0.5):
"""
Create an inset axes with a given width and height.
Both sizes used can be specified either in inches or percentage of the
parent axes.
Parameters
----------
parent_axes : `matplotlib.axes.Axes`
Axes to place the inset axes.
width, height : float or str
Size of the inset axes to create.
loc : int or string, optional, default to 1
Location to place the inset axes. The valid locations are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
bbox_to_anchor : tuple or `matplotlib.transforms.BboxBase`, optional
Bbox that the inset axes will be anchored. Can be a tuple of
[left, bottom, width, height], or a tuple of [left, bottom].
bbox_transform : `matplotlib.transforms.Transform`, optional
Transformation for the bbox. if None, `parent_axes.transAxes` is used.
axes_class : `matplotlib.axes.Axes` type, optional
If specified, the inset axes created with be created with this class's
constructor.
axes_kwargs : dict, optional
Keyworded arguments to pass to the constructor of the inset axes.
Valid arguments include:
%(Axes)s
borderpad : float, optional
Padding between inset axes and the bbox_to_anchor. Defaults to 0.5.
Returns
-------
inset_axes : `axes_class`
Inset axes object created.
"""
if axes_class is None:
axes_class = HostAxes
if axes_kwargs is None:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
else:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
**axes_kwargs)
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
axes_locator = AnchoredSizeLocator(bbox_to_anchor,
width, height,
loc=loc,
bbox_transform=bbox_transform,
borderpad=borderpad)
inset_axes.set_axes_locator(axes_locator)
_add_inset_axes(parent_axes, inset_axes)
return inset_axes
@docstring.dedent_interpd
def zoomed_inset_axes(parent_axes, zoom, loc=1,
bbox_to_anchor=None, bbox_transform=None,
axes_class=None,
axes_kwargs=None,
borderpad=0.5):
"""
Create an anchored inset axes by scaling a parent axes.
Parameters
----------
parent_axes : `matplotlib.axes.Axes`
Axes to place the inset axes.
zoom : float
Scaling factor of the data axes. *zoom* > 1 will enlargen the
coordinates (i.e., "zoomed in"), while *zoom* < 1 will shrink the
coordinates (i.e., "zoomed out").
loc : int or string, optional, default to 1
Location to place the inset axes. The valid locations are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
bbox_to_anchor : tuple or `matplotlib.transforms.BboxBase`, optional
Bbox that the inset axes will be anchored. Can be a tuple of
[left, bottom, width, height], or a tuple of [left, bottom].
bbox_transform : `matplotlib.transforms.Transform`, optional
Transformation for the bbox. if None, `parent_axes.transAxes` is used.
axes_class : `matplotlib.axes.Axes` type, optional
If specified, the inset axes created with be created with this class's
constructor.
axes_kwargs : dict, optional
Keyworded arguments to pass to the constructor of the inset axes.
Valid arguments include:
%(Axes)s
borderpad : float, optional
Padding between inset axes and the bbox_to_anchor. Defaults to 0.5.
Returns
-------
inset_axes : `axes_class`
Inset axes object created.
"""
if axes_class is None:
axes_class = HostAxes
if axes_kwargs is None:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
else:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
**axes_kwargs)
axes_locator = AnchoredZoomLocator(parent_axes, zoom=zoom, loc=loc,
bbox_to_anchor=bbox_to_anchor,
bbox_transform=bbox_transform,
borderpad=borderpad)
inset_axes.set_axes_locator(axes_locator)
_add_inset_axes(parent_axes, inset_axes)
return inset_axes
@docstring.dedent_interpd
def mark_inset(parent_axes, inset_axes, loc1, loc2, **kwargs):
"""
Draw a box to mark the location of an area represented by an inset axes.
This function draws a box in *parent_axes* at the bounding box of
*inset_axes*, and shows a connection with the inset axes by drawing lines
at the corners, giving a "zoomed in" effect.
Parameters
----------
parent_axes : `matplotlib.axes.Axes`
Axes which contains the area of the inset axes.
inset_axes : `matplotlib.axes.Axes`
The inset axes.
loc1, loc2 : {1, 2, 3, 4}
Corners to use for connecting the inset axes and the area in the
parent axes.
**kwargs
Patch properties for the lines and box drawn:
%(Patch)s
Returns
-------
pp : `matplotlib.patches.Patch`
The patch drawn to represent the area of the inset axes.
p1, p2 : `matplotlib.patches.Patch`
The patches connecting two corners of the inset axes and its area.
"""
rect = TransformedBbox(inset_axes.viewLim, parent_axes.transData)
pp = BboxPatch(rect, fill=False, **kwargs)
parent_axes.add_patch(pp)
p1 = BboxConnector(inset_axes.bbox, rect, loc1=loc1, **kwargs)
inset_axes.add_patch(p1)
p1.set_clip_on(False)
p2 = BboxConnector(inset_axes.bbox, rect, loc1=loc2, **kwargs)
inset_axes.add_patch(p2)
p2.set_clip_on(False)
return pp, p1, p2
| gpl-3.0 |
nhejazi/scikit-learn | sklearn/neighbors/nearest_centroid.py | 8 | 7451 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
from ..utils.multiclass import check_classification_targets
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
if self.metric == 'precomputed':
raise ValueError("Precomputed is not supported.")
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
check_classification_targets(y)
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to its members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) - (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
abimannans/scikit-learn | sklearn/tree/tests/test_export.py | 130 | 9950 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
| bsd-3-clause |
mganeva/mantid | qt/python/mantidqt/project/plotssaver.py | 1 | 10049 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantidqt package
#
from __future__ import (absolute_import, division, print_function, unicode_literals)
from matplotlib import ticker
import matplotlib.axis
from matplotlib.image import AxesImage
from mantid import logger
try:
from matplotlib.colors import to_hex
except ImportError:
from matplotlib.colors import colorConverter, rgb2hex
def to_hex(color):
return rgb2hex(colorConverter.to_rgb(color))
class PlotsSaver(object):
def __init__(self):
self.figure_creation_args = {}
def save_plots(self, plot_dict):
# if arguement is none return empty dictionary
if plot_dict is None:
return []
plot_list = []
for index in plot_dict:
try:
plot_list.append(self.get_dict_from_fig(plot_dict[index].canvas.figure))
except BaseException as e:
# Catch all errors in here so it can fail silently-ish, if this is happening on all plots make sure you
# have built your project.
if isinstance(e, KeyboardInterrupt):
raise KeyboardInterrupt
logger.warning("A plot was unable to be saved")
return plot_list
def get_dict_from_fig(self, fig):
axes_list = []
create_list = []
for ax in fig.axes:
try:
create_list.append(ax.creation_args)
self.figure_creation_args = ax.creation_args
except AttributeError:
logger.debug("Axis had a axis without creation_args - Common with colorfill")
continue
axes_list.append(self.get_dict_for_axes(ax))
fig_dict = {"creationArguments": create_list,
"axes": axes_list,
"label": fig._label,
"properties": self.get_dict_from_fig_properties(fig)}
return fig_dict
@staticmethod
def get_dict_for_axes_colorbar(ax):
image = None
cb_dict = {}
# If an image is present (from imshow)
if len(ax.images) > 0 and isinstance(ax.images[0], AxesImage):
image = ax.images[0]
# If an image is present from pcolor/pcolormesh
elif len(ax.collections) > 0 and isinstance(ax.collections[0], AxesImage):
image = ax.collections[0]
else:
cb_dict["exists"] = False
return cb_dict
cb_dict["exists"] = True
cb_dict["max"] = image.norm.vmax
cb_dict["min"] = image.norm.vmin
cb_dict["interpolation"] = image._interpolation
cb_dict["cmap"] = image.cmap.name
cb_dict["label"] = image._label
return cb_dict
def get_dict_for_axes(self, ax):
ax_dict = {"properties": self.get_dict_from_axes_properties(ax),
"title": ax.get_title(),
"xAxisTitle": ax.get_xlabel(),
"yAxisTitle": ax.get_ylabel(),
"colorbar": self.get_dict_for_axes_colorbar(ax)}
# Get lines from the axes and store it's data
lines_list = []
for index, line in enumerate(ax.lines):
lines_list.append(self.get_dict_from_line(line, index))
ax_dict["lines"] = lines_list
texts_list = []
for text in ax.texts:
texts_list.append(self.get_dict_from_text(text))
ax_dict["texts"] = texts_list
# Potentially need to handle artists that are Text
artist_text_dict = {}
for artist in ax.artists:
if isinstance(artist, matplotlib.text.Text):
artist_text_dict = self.get_dict_from_text(artist)
ax_dict["textFromArtists"] = artist_text_dict
legend_dict = {}
legend = ax.get_legend()
if legend is not None:
legend_dict["visible"] = legend.get_visible()
legend_dict["exists"] = True
else:
legend_dict["exists"] = False
ax_dict["legend"] = legend_dict
return ax_dict
def get_dict_from_axes_properties(self, ax):
return {"bounds": ax.get_position().bounds,
"dynamic": ax.get_navigate(),
"axisOn": ax.axison,
"frameOn": ax.get_frame_on(),
"visible": ax.get_visible(),
"xAxisProperties": self.get_dict_from_axis_properties(ax.xaxis),
"yAxisProperties": self.get_dict_from_axis_properties(ax.yaxis),
"xAxisScale": ax.xaxis.get_scale(),
"xLim": ax.get_xlim(),
"yAxisScale": ax.yaxis.get_scale(),
"yLim": ax.get_ylim()}
def get_dict_from_axis_properties(self, ax):
prop_dict = {"majorTickLocator": type(ax.get_major_locator()).__name__,
"minorTickLocator": type(ax.get_minor_locator()).__name__,
"majorTickFormatter": type(ax.get_major_formatter()).__name__,
"minorTickFormatter": type(ax.get_minor_formatter()).__name__,
"gridStyle": self.get_dict_for_grid_style(ax),
"visible": ax.get_visible()}
label1On = ax._major_tick_kw.get('label1On', True)
if isinstance(ax, matplotlib.axis.XAxis):
if label1On:
prop_dict["position"] = "Bottom"
else:
prop_dict["position"] = "Top"
elif isinstance(ax, matplotlib.axis.YAxis):
if label1On:
prop_dict["position"] = "Left"
else:
prop_dict["position"] = "Right"
else:
raise ValueError("Value passed is not a valid axis")
if isinstance(ax.get_major_locator(), ticker.FixedLocator):
prop_dict["majorTickLocatorValues"] = list(ax.get_major_locator())
else:
prop_dict["majorTickLocatorValues"] = None
if isinstance(ax.get_minor_locator(), ticker.FixedLocator):
prop_dict["minorTickLocatorValues"] = list(ax.get_minor_locator())
else:
prop_dict["minorTickLocatorValues"] = None
formatter = ax.get_major_formatter()
if isinstance(formatter, ticker.FixedFormatter):
prop_dict["majorTickFormat"] = list(formatter.seq)
else:
prop_dict["majorTickFormat"] = None
formatter = ax.get_minor_formatter()
if isinstance(formatter, ticker.FixedFormatter):
prop_dict["minorTickFormat"] = list(formatter.seq)
else:
prop_dict["minorTickFormat"] = None
labels = ax.get_ticklabels()
if labels:
prop_dict["fontSize"] = labels[0].get_fontsize()
else:
prop_dict["fontSize"] = ""
return prop_dict
@staticmethod
def get_dict_for_grid_style(ax):
grid_style = {}
gridlines = ax.get_gridlines()
if ax._gridOnMajor and len(gridlines) > 0:
grid_style["color"] = to_hex(gridlines[0].get_color())
grid_style["alpha"] = gridlines[0].get_alpha()
grid_style["gridOn"] = True
else:
grid_style["gridOn"] = False
return grid_style
def get_dict_from_line(self, line, index=0):
line_dict = {"lineIndex": index,
"label": line.get_label(),
"alpha": line.get_alpha(),
"color": to_hex(line.get_color()),
"lineWidth": line.get_linewidth(),
"lineStyle": line.get_linestyle(),
"markerStyle": self.get_dict_from_marker_style(line),
"errorbars": self.get_dict_for_errorbars(line)}
if line_dict["alpha"] is None:
line_dict["alpha"] = 1
return line_dict
def get_dict_for_errorbars(self, line):
if self.figure_creation_args[0]["function"] == "errorbar":
return {"exists": True,
"dashCapStyle": line.get_dash_capstyle(),
"dashJoinStyle": line.get_dash_joinstyle(),
"solidCapStyle": line.get_solid_capstyle(),
"solidJoinStyle": line.get_solid_joinstyle()}
else:
return {"exists": False}
@staticmethod
def get_dict_from_marker_style(line):
style_dict = {"faceColor": to_hex(line.get_markerfacecolor()),
"edgeColor": to_hex(line.get_markeredgecolor()),
"edgeWidth": line.get_markeredgewidth(),
"markerType": line.get_marker(),
"markerSize": line.get_markersize(),
"zOrder": line.get_zorder()}
return style_dict
def get_dict_from_text(self, text):
text_dict = {"text": text.get_text()}
if text_dict["text"]:
# text_dict["transform"] = text.get_transform()
text_dict["position"] = text.get_position()
text_dict["useTeX"] = text.get_usetex()
text_dict["style"] = self.get_dict_from_text_style(text)
return text_dict
@staticmethod
def get_dict_from_text_style(text):
style_dict = {"alpha": text.get_alpha(),
"textSize": text.get_size(),
"color": to_hex(text.get_color()),
"hAlign": text.get_horizontalalignment(),
"vAlign": text.get_verticalalignment(),
"rotation": text.get_rotation(),
"zOrder": text.get_zorder()}
if style_dict["alpha"] is None:
style_dict["alpha"] = 1
return style_dict
@staticmethod
def get_dict_from_fig_properties(fig):
return {"figWidth": fig.get_figwidth(), "figHeight": fig.get_figheight(), "dpi": fig.dpi}
| gpl-3.0 |
robin-lai/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
AIML/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 142 | 18692 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
lamastex/scalable-data-science | dbcArchives/2021/000_7-sds-3-x-ddl/exjobbsOfCombientMix2021_04_pspnet_tuning_parallel.py | 1 | 12625 | # Databricks notebook source
# MAGIC %md
# MAGIC # Implementation of [PSPNet](https://arxiv.org/pdf/1612.01105.pdf) with Hyperparameter Tuning in Parallel
# MAGIC
# MAGIC William Anzén ([Linkedin](https://www.linkedin.com/in/william-anz%C3%A9n-b52003199/)), Christian von Koch ([Linkedin](https://www.linkedin.com/in/christianvonkoch/))
# MAGIC
# MAGIC 2021, Stockholm, Sweden
# MAGIC
# MAGIC This project was supported by Combient Mix AB under the industrial supervision of Razesh Sainudiin and Max Fischer.
# MAGIC
# MAGIC In this notebook, an implementation of [PSPNet](https://arxiv.org/pdf/1612.01105.pdf) is presented which is an architecture which uses scene parsing and evaluates the images at different scales and finally combines the different results to form a final prediction. The architecture is evaluated against the [Oxford-IIIT Pet Dataset](https://www.robots.ox.ac.uk/~vgg/data/pets/) whilst some hyperparameters of the model are tunead parallely through Spark Trials.
# COMMAND ----------
# MAGIC %md
# MAGIC Importing the required packages.
# COMMAND ----------
import tensorflow as tf
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
from tensorflow.keras.layers import AveragePooling2D, Conv2D, BatchNormalization, Activation, Concatenate, UpSampling2D, Reshape, TimeDistributed, ConvLSTM2D
from tensorflow.keras import Model
import numpy as np
from tensorflow.keras.applications.resnet50 import ResNet50
from hyperopt import fmin, tpe, hp, Trials, STATUS_OK, SparkTrials
# COMMAND ----------
# MAGIC %md
# MAGIC Defining functions for normalizing and transforming the images.
# COMMAND ----------
# Function for normalizing image_size so that pixel intensity is between 0 and 1
def normalize(input_image, input_mask):
input_image = tf.cast(input_image, tf.float32) / 255.0
input_mask -= 1
return input_image, input_mask
# Function for resizing the train images to the desired input shape of 128x128 as well as augmenting the training images
def load_image_train(datapoint):
input_image = tf.image.resize(datapoint['image'], (128, 128))
input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128))
if tf.random.uniform(()) > 0.5:
input_image = tf.image.flip_left_right(input_image)
input_mask = tf.image.flip_left_right(input_mask)
input_image, input_mask = normalize(input_image, input_mask)
input_mask = tf.math.round(input_mask)
return input_image, input_mask
# Function for resizing the test images to the desired output shape (no augmenation)
def load_image_test(datapoint):
input_image = tf.image.resize(datapoint['image'], (128, 128))
input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128))
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
# COMMAND ----------
# MAGIC %md
# MAGIC The [Oxford-IIT Pet Dataset](https://www.robots.ox.ac.uk/~vgg/data/pets/) from the TensorFlow datasets is loaded and then the images are transformed in desired way and the datasets used for training and inference are created.
# COMMAND ----------
dataset, info = tfds.load('oxford_iiit_pet:3.*.*', with_info=True)
TRAIN_LENGTH = info.splits['train'].num_examples
TEST_LENGTH = info.splits['test'].num_examples
BATCH_SIZE = 64
BUFFER_SIZE = 1000
STEPS_PER_EPOCH = TRAIN_LENGTH // BATCH_SIZE
train = dataset['train'].map(load_image_train)
test = dataset['test'].map(load_image_test)
# COMMAND ----------
# MAGIC %md
# MAGIC The dataset is then converted to numpy format since Spark Trials does not yet handle the TensorFlow Dataset class.
# COMMAND ----------
train_numpy = tfds.as_numpy(train)
test_numpy = tfds.as_numpy(test)
# COMMAND ----------
X_train = np.array(list(map(lambda x: x[0], train_numpy)))
Y_train = np.array(list(map(lambda x: x[1], train_numpy)))
X_test = np.array(list(map(lambda x: x[0], test_numpy)))
Y_test = np.array(list(map(lambda x: x[1], test_numpy)))
# COMMAND ----------
# MAGIC %md
# MAGIC An example image is displayed.
# COMMAND ----------
def display(display_list):
plt.figure(figsize=(15, 15))
title = ['Input Image', 'True Mask', 'Predicted Mask']
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i+1)
plt.title(title[i])
plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))
plt.axis('off')
plt.show()
it = iter(train)
#for image, mask in next(train):
sample_image, sample_mask = next(it)
display([sample_image, sample_mask])
# COMMAND ----------
# MAGIC %md
# MAGIC Defining the functions needed for the PSPNet.
# COMMAND ----------
def pool_block(cur_tensor: tf.Tensor,
image_width: int,
image_height: int,
pooling_factor: int,
activation: str = 'relu'
) -> tf.Tensor:
"""
Parameters
----------
cur_tensor : tf.Tensor
Incoming tensor.
image_width : int
Width of the image.
image_height : int
Height of the image.
pooling_factor : int
Pooling factor to scale image.
activation : str, default = 'relu'
Activation function to be applied after the pooling operations.
Returns
-------
tf.Tensor
2D convolutional block.
"""
#Calculate the strides with image size and pooling factor
strides = [int(np.round(float(image_width)/pooling_factor)),
int(np.round(float(image_height)/pooling_factor))]
pooling_size = strides
x = AveragePooling2D(pooling_size, strides=strides, padding='same')(cur_tensor)
x = Conv2D(filters = 512,
kernel_size = (1,1),
padding = 'same')(x)
x = BatchNormalization()(x)
x = Activation(activation)(x)
# Resizing images to correct shape for future concat
x = tf.keras.layers.experimental.preprocessing.Resizing(
image_height,
image_width,
interpolation="bilinear")(x)
return x
def modify_ResNet_Dilation(
model: tf.keras.Model
) -> tf.keras.Model:
"""
Modifies the ResNet to fit the PSPNet Paper with the described dilated strategy.
Parameters
----------
model : tf.keras.Model
The ResNet-50 model to be modified.
Returns
-------
tf.keras.Model
Modified model.
"""
for i in range(0,4):
model.get_layer('conv4_block1_{}_conv'.format(i)).strides = 1
model.get_layer('conv4_block1_{}_conv'.format(i)).dilation_rate = 2
model.get_layer('conv5_block1_{}_conv'.format(i)).strides = 1
model.get_layer('conv5_block1_{}_conv'.format(i)).dilation_rate = 4
model.save('/tmp/my_model')
new_model = tf.keras.models.load_model('/tmp/my_model')
return new_model
def PSPNet(image_width: int,
image_height: int,
n_classes: int,
kernel_size: tuple = (3,3),
activation: str = 'relu',
weights: str = 'imagenet',
shallow_tuning: bool = False,
isICNet: bool = False
) -> tf.keras.Model:
"""
Setting up the PSPNet model
Parameters
----------
image_width : int
Width of the image.
image_height : int
Height of the image.
n_classes : int
Number of classes.
kernel_size : tuple, default = (3, 3)
Size of the kernel.
activation : str, default = 'relu'
Activation function to be applied after the pooling operations.
weights: str, default = 'imagenet'
String defining which weights to use for the backbone, 'imagenet' for ImageNet weights or None for normalized initialization.
shallow_tuning : bool, default = True
Boolean for using shallow tuning with pre-trained weights from ImageNet or not.
isICNet : bool, default = False
Boolean to determine if the PSPNet will be part of an ICNet.
Returns
-------
tf.keras.Model
The finished keras model for PSPNet.
"""
if shallow_tuning and not weights:
raise ValueError("Shallow tuning can not be performed without loading pre-trained weights. Please input 'imagenet' to argument weights...")
#If the function is used for the ICNet input_shape is set to none as ICNet takes 3 inputs
if isICNet:
input_shape=(None, None, 3)
else:
input_shape=(image_height,image_width,3)
#Initializing the ResNet50 Backbone
y=ResNet50(include_top=False, weights=weights, input_shape=input_shape)
y=modify_ResNet_Dilation(y)
if shallow_tuning:
y.trainable=False
pooling_layer=[]
output=y.output
pooling_layer.append(output)
h = image_height//8
w = image_width//8
#Loop for calling the pool block functions for pooling factors [1,2,3,6]
for i in [1,2,3,6]:
pool = pool_block(output, h, w, i, activation)
pooling_layer.append(pool)
x=Concatenate()(pooling_layer)
x=Conv2D(filters=n_classes, kernel_size=(1,1), padding='same')(x)
x=UpSampling2D(size=(8,8), data_format='channels_last', interpolation='bilinear')(x)
x=Reshape((image_height*image_width, n_classes))(x)
x=Activation(tf.nn.softmax)(x)
x=Reshape((image_height,image_width,n_classes))(x)
final_model=tf.keras.Model(inputs=y.input, outputs=x)
return final_model
# COMMAND ----------
# MAGIC %md
# MAGIC Defining the custom data generators used for training the model.
# COMMAND ----------
def batch_generator(batch_size):
indices = np.arange(len(X_train))
batch=[]
while True:
# it might be a good idea to shuffle your data before each epoch
np.random.shuffle(indices)
for i in indices:
batch.append(i)
if len(batch)==batch_size:
yield X_train[batch], Y_train[batch]
batch=[]
def batch_generator_eval(batch_size):
indices = np.arange(len(X_test))
batch=[]
while True:
for i in indices:
batch.append(i)
if len(batch)==batch_size:
yield X_test[batch], Y_test[batch]
batch=[]
# COMMAND ----------
# MAGIC %md
# MAGIC To exploit parallelism, Spark Trials require that you define a function to be used for loading the dataset as well as training and evaluating the model.
# COMMAND ----------
def train_spark(params):
gpus = tf.config.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growt*h needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
VAL_SUBSPLITS=5
EPOCHS=50
VALIDATION_STEPS = TEST_LENGTH//params['batch_size']//VAL_SUBSPLITS
STEPS_PER_EPOCH = TRAIN_LENGTH // params['batch_size']
BATCH_SIZE = params['batch_size']
print(BATCH_SIZE)
BUFFER_SIZE = 1000
"""
An example train method that calls into HorovodRunner.
This method is passed to hyperopt.fmin().
:param params: hyperparameters. Its structure is consistent with how search space is defined. See below.
:return: dict with fields 'loss' (scalar loss) and 'status' (success/failure status of run)
"""
train_dataset = batch_generator(BATCH_SIZE)
test_dataset = batch_generator_eval(BATCH_SIZE)
model=PSPNet(128,128,3)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=params['learning_rate']),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=tf.keras.metrics.SparseCategoricalAccuracy())
model_history = model.fit(train_dataset, epochs=EPOCHS, steps_per_epoch=STEPS_PER_EPOCH)
loss = model.evaluate(test_dataset, steps=VALIDATION_STEPS)[0]
return {'loss': loss, 'status': STATUS_OK}
# COMMAND ----------
# MAGIC %md
# MAGIC In this experiment, the hyperparameters `learning_rate` and `batch_size` were explored through random search in combination with an adaptive algorithm called Tree of Parzen Estimators (TPE). Since parallelism were set to 4 the choice of hyperparameters will be set randomly for the first 4 runs in parallel and then adaptively for the second pass of 4 runs.
# COMMAND ----------
space = {
'learning_rate': hp.loguniform('learning_rate', np.log(1e-4), np.log(1e-1)),
'batch_size': hp.choice('batch_size', [16, 32, 64]),
}
# COMMAND ----------
algo=tpe.suggest
spark_trials = SparkTrials(parallelism=4)
best_param = fmin(
fn=train_spark,
space=space,
algo=algo,
max_evals=8,
return_argmin=False,
trials = spark_trials,
)
print(best_param)
# COMMAND ----------
| unlicense |
mwv/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 98 | 20870 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
| bsd-3-clause |
CVML/scikit-learn | sklearn/cluster/birch.py | 207 | 22706 | # Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
JeffHeard/terrapyn | ows/rendering/palettes.py | 1 | 10925 | """
Color paletting for numpy arrays. This is more flexible than matplotlib's
color palettes, although we may eventually move to them and force
pre-computation of array inputs to matplotlib. Depends on performance
concerns.
"""
import numpy as np
import colorsys
import logging
log = logging.getLogger(__name__)
def rgba(r=0, g=0, b=0, a=255):
"""
A function that returns a color for integral rgba values between 0 and 255
:param r: default 0.
:type r: Integer 0-255.
:param g: default 0.
:type g: Integer 0-255.
:param b: default 0.
:type b: Integer 0-255.
:param a: default 0.
:type a: Integer 0-255.
"""
return np.array((r, g, b, a), dtype=np.uint8)
def rgba1(r=0, g=0, b=0, a=1):
"""
A function that returns a color for floating point values between 0.0 and 1.0
:param r: default 0.0
:type r: Float 0.0-1.0
:param g: default 0.0
:type g: Float 0.0-1.0
:param b: default 0.0
:type b: Float 0.0-1.0
:param a: default 0.0
:type a: Float 0.0-1.0
"""
return np.array((r*255, g*255, b*255, a*255), dtype=np.uint8)
def rgbahex(color):
"""A function that returns a color for a hex integer"""
return np.array((color,), dtype=np.uint32).view(dtype=np.uint8)
class NullColorEntry(object):
"""
A palette entry that matches the null value. Supports "in" syntax. All palette entries are callables and are generally used thusly::
bin = NullColorEntry(rgba(0,0,0,0))
for v in ...:
if v in bin:
output.append(bin(v))
"""
def __init__(self, color):
self.color = color
def __contains__(self, value):
return value is None
def __call__(self, v):
return self.color
class CatchAll(object):
"""A palette entry that matches any value"""
def __init__(self, color):
self.color = color
def __contains__(self, value):
return True
def __call__(self, v):
return self.color.view(np.uint32)[0]
class ColorBin(object):
"""
A palette entry that presents a uniform color entry for any value between
bounds. By default it is unbounded on either end. To change this
behaviour, specify "left_value" or "right_value" parameters to bound the
bin on one side. Additionally, by default bounding values are included as
part of the bin. To change this behaviour, include_right and include_left
can be set to false in the constructor.
"""
def __init__(self, color, left_value=float('-Inf'), right_value=float("Inf"), include_left=True, include_right=True):
self.l = left_value
self.r = right_value
self.color = color.view(np.uint32)[0]
self.include_right = include_right
self.include_left = include_left
def __contains__(self, value):
return ((value > self.l) or (value == self.l and self.include_left)) and ((value < self.r) or (value == self.r and self.include_right))
def __call__(self, v):
return self.color
class LinearGradient(object):
"""
A gradient palette entry between two floating point numbers. This works
more or less the same way as ColorBin, except that the palette slides
between the colors at the endpoints for intermediate values. There is one
additional parameter, "stops" which is the number of stops between the
values. The default for this is 32, which represents 32 levels of
gradation between the endpoints. Colors are blended using HSL blending, so
changing hues is safe.
"""
def __init__(self, left_color, right_color, left_value, right_value, include_left=True, include_right=True, stops=32):
self.l = left_value
self.r = right_value
self.lc = np.array(colorsys.rgb_to_hls(*left_color[0:3]/255.0) + (left_color[3]/255.0,), dtype=np.float32)
self.rc = np.array(colorsys.rgb_to_hls(*right_color[0:3]/255.0) + (right_color[3]/255.0,), dtype=np.float32)
self.include_right = include_right
self.include_left = include_left
self.stops = stops
self.delta = self.stops / (self.r-self.l)
self.colors = np.zeros((stops,4), dtype=np.uint8)
for stop in range(stops):
k = float(stop)/stops
h,l,s,a = (k*self.rc + (1-k)*self.lc)
r,g,b = colorsys.hls_to_rgb(h,l,s)
self.colors[stop] = 255*np.array((r,g,b,a), dtype=np.float32)
self.colors = self.colors.view(dtype=np.uint32)
def __contains__(self, value):
return ((value > self.l) or (value == self.l and self.include_left)) and ((value < self.r) or (value == self.r and self.include_right))
def __call__(self, va):
iv = int((va-self.l) * self.delta)
return self.colors[iv,0]
class Choices(object):
"""
A stepped palette among logical choices, with the possibility of an "out of band" choice for None values.
"""
def __init__(self, choices, colors, null_color=None):
self.choices = dict(zip(choices,colors))
self.null_color = null_color
def __contains__(self, value):
return value in self.choices
def __call__(self, value):
return self.choices(value)
class Lambda(object):
"""An imputed gradient that maps from a function of keyword args to a null, 0.0 - 1.0 value"""
def __init__(self, fn, left_color, right_color, null_color=None, stops=32):
self.fn = fn
self.left_color = left_color
self.right_color = right_color
self.null_color = null_color
self.lc = np.array(colorsys.rgb_to_hls(*left_color[0:3]/255.0) + (left_color[3]/255.0,), dtype=np.float32)
self.rc = np.array(colorsys.rgb_to_hls(*right_color[0:3]/255.0) + (right_color[3]/255.0,), dtype=np.float32)
self.stops = stops
def __contains__(self, value):
v = self.fn(value)
return v is not None and v >= 0.0 and v <= 1.0
def __call__(self, value):
if self.null_color and value is None:
return self.null_color
else:
a = int(self.stops*self.fn(value))
return self.colors[a]
# a callable function that can be vectorized to operate on a single array
class _Palette(object):
def __init__(self, *bands):
self.bands = bands
def __call__(self, value):
for band in self.bands:
if value in band:
return band(value)
return 0
# a callable function that can be vectoried to operate on a number or arrays
class _LambdaPalette(object):
def __init__(self, *bands):
self.bands = bands
def __call__(self, *value):
for band in self.bands:
if value in band:
return band(*value)
return 0
class Palette(object):
"""
A Palette is used in terrapyn_project to take a value raster and turn it into a
color raster::
p = Palette(
ColorBin(rgba(0,0,0), 0.0, 30.0),
ColorBin(rgba(255,0,0), 30.0, 60.0),
ColorBin(rgba(0,255,0), 60.0, 90.0),
ColorBin(rgba(0,0,255), 90.0, 120.0),
CatchAll(rgba(255,255,255,0))
)
ds = gdal.Open('foo.bin')
scipy.imsave('foo.png', p(ds.ReadAsArray()))
Palettes can contain objects of type ColorBin, LinearGradient, CatchAll,
Choices, and NullColorEntry. Palettes can also take Lambda objects so long
as the functions only expect one parameter. A CatchAll, if provided,
should always be the last object.
"""
def __init__(self, *palette):
"""A behaviour that takes a single array and a palette and transfers to a colored array"""
self.palette = np.vectorize(_Palette(*palette), otypes=[np.uint32])
def __call__(self, value):
p = self.palette(value)
shape = value.shape + (4,)
return p.view(dtype=np.uint8).reshape(*shape)
class MultiKeyPalette(object):
"""
A MultiKeyPalette is used in terrapyn_project to take a series of layers and
turn them into a single color raster::
def freezing_precip(qpf, temp):
if temp <= 32 and qpf > 0:
return 0.5
else:
return None
def liquid_precip(qpf, temp):
if temp > 32 and qpf > 0:
return 0.5
else:
return None
def catch_all(**_0):
return 0.5
p = MultiKeyPalette(('qpf','temp')
Lambda(rgba(0,0,255), freezing_precip),
Lambda(rgba(0,255,0), liquid_precip),
Lambda(rgba(0,0,0,0), catch_all)
)
# use the palette
temp = gdal.Open('ds.temp.bin')
qpf = gdal.Open('ds.qpf.bin')
scipy.imsave('foo.png', p(temp=temp.ReadAsArray(), qpf=qpf.ReadAsArray()))
MultiKeyPalettes can contain only objects of type Lambda. These can take
either arbitrary keyword arguments or can take specific ones.
"""
def __init__(self, ordering, *palette):
"""A behaviour that takes a dictionary of arrays and a lambda palette"""
self.palette = np.vectorize(_LambdaPalette(palette), otypes=[np.uint32])
def __call__(self, **value):
self.palette(*[value[v] for v in self.ordering]).view(dtype=np.uint8).reshape(value.values()[0].shape[0], value[0].values()[0].shape[1], 4)
class LayeredColorTransfer(object):
"""
TODO currently unimplemented. This will work as a sort of hybrid between
MultiKeyPalette and Palette::
p = LayeredColorTransfer(('landuse', 'dem')
landuse = (LayeredColorTransfer.NORMAL, # bottom layer
(ColorBin(...),
LinearGradient(...),
LinearGradient(...))),
dem = (LayeredColorTransfer.VALUE,
(LinearGradient(...),))
)
landuse = gdal.Open('landuse.tiff')
dem = gdal.Open('land-topographically-shaded.tiff')
scipy.misc.imsave('shaded-land.tiff', p(landuse=landuse, dem=dem))
For more information on what the color transfer modes named here do, see
Photoshop or Gimp documentation.
"""
NORMAL = -1
"""Translucent replace layering mode"""
ADD = 0
"""Add layering mode"""
SUBTRACT = 1
"""Difference layering mode"""
DODGE = 2
"""Dodge layering mode"""
BURN = 3
"""Burn layering mode"""
MULTIPLY = 4
"""Multiply layering mode"""
SOFT_LIGHT = 5
"""Soft light layering mode"""
HARD_LIGHT = 6
"""Hard light layering mode"""
OVERLAY = 7
"""Overlay layering mode"""
SCREEN = 8
"""Screen layering mode"""
COLOR = 9
"""Color layering mode"""
VALUE = 10
"""Value layering mode"""
HUE = 11
"""Hue layering mode"""
SATURATION = 12
"""Saturation layering mode"""
def __init__(self, ordering, **palette):
pass
| apache-2.0 |
cython-testbed/pandas | pandas/tests/frame/test_timeseries.py | 2 | 29979 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, time
import pytest
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas import (DataFrame, Series, Index,
Timestamp, DatetimeIndex, MultiIndex,
to_datetime, date_range, period_range)
import pandas as pd
import pandas.tseries.offsets as offsets
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_index_equal,
assert_raises_regex)
import pandas.util.testing as tm
from pandas.compat import product
from pandas.tests.frame.common import TestData
class TestDataFrameTimeSeriesMethods(TestData):
def test_diff(self):
the_diff = self.tsframe.diff(1)
assert_series_equal(the_diff['A'],
self.tsframe['A'] - self.tsframe['A'].shift(1))
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({'s': s}).diff()
assert rs.s[1] == 1
# mixed numeric
tf = self.tsframe.astype('float32')
the_diff = tf.diff(1)
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
# issue 10907
df = pd.DataFrame({'y': pd.Series([2]), 'z': pd.Series([3])})
df.insert(0, 'x', 1)
result = df.diff(axis=1)
expected = pd.DataFrame({'x': np.nan, 'y': pd.Series(
1), 'z': pd.Series(1)}).astype('float64')
assert_frame_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_diff_datetime_axis0(self, tz):
# GH 18578
df = DataFrame({0: date_range('2010', freq='D', periods=2, tz=tz),
1: date_range('2010', freq='D', periods=2, tz=tz)})
result = df.diff(axis=0)
expected = DataFrame({0: pd.TimedeltaIndex(['NaT', '1 days']),
1: pd.TimedeltaIndex(['NaT', '1 days'])})
assert_frame_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_diff_datetime_axis1(self, tz):
# GH 18578
df = DataFrame({0: date_range('2010', freq='D', periods=2, tz=tz),
1: date_range('2010', freq='D', periods=2, tz=tz)})
if tz is None:
result = df.diff(axis=1)
expected = DataFrame({0: pd.TimedeltaIndex(['NaT', 'NaT']),
1: pd.TimedeltaIndex(['0 days',
'0 days'])})
assert_frame_equal(result, expected)
else:
with pytest.raises(NotImplementedError):
result = df.diff(axis=1)
def test_diff_timedelta(self):
# GH 4533
df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
Timestamp('20130101 9:02')],
value=[1.0, 2.0]))
res = df.diff()
exp = DataFrame([[pd.NaT, np.nan],
[pd.Timedelta('00:01:00'), 1]],
columns=['time', 'value'])
assert_frame_equal(res, exp)
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)
result = df.diff()
assert result[0].dtype == np.float64
def test_diff_neg_n(self):
rs = self.tsframe.diff(-1)
xp = self.tsframe - self.tsframe.shift(-1)
assert_frame_equal(rs, xp)
def test_diff_float_n(self):
rs = self.tsframe.diff(1.)
xp = self.tsframe.diff(1)
assert_frame_equal(rs, xp)
def test_diff_axis(self):
# GH 9727
df = DataFrame([[1., 2.], [3., 4.]])
assert_frame_equal(df.diff(axis=1), DataFrame(
[[np.nan, 1.], [np.nan, 1.]]))
assert_frame_equal(df.diff(axis=0), DataFrame(
[[np.nan, np.nan], [2., 2.]]))
def test_pct_change(self):
rs = self.tsframe.pct_change(fill_method=None)
assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
rs = self.tsframe.pct_change(2)
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = self.tsframe.pct_change(fill_method='bfill', limit=1)
filled = self.tsframe.fillna(method='bfill', limit=1)
assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = self.tsframe.pct_change(freq='5D')
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs,
(filled / filled.shift(freq='5D') - 1)
.reindex_like(filled))
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
df = DataFrame({'a': s, 'b': s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, 0., 2.5 / 1.5 - 1, .2])
edf = DataFrame({'a': expected, 'b': expected})
assert_frame_equal(chg, edf)
@pytest.mark.parametrize("freq, periods, fill_method, limit",
[('5B', 5, None, None),
('3B', 3, None, None),
('3B', 3, 'bfill', None),
('7B', 7, 'pad', 1),
('7B', 7, 'bfill', 3),
('14B', 14, None, None)])
def test_pct_change_periods_freq(self, freq, periods, fill_method, limit):
# GH 7292
rs_freq = self.tsframe.pct_change(freq=freq,
fill_method=fill_method,
limit=limit)
rs_periods = self.tsframe.pct_change(periods,
fill_method=fill_method,
limit=limit)
assert_frame_equal(rs_freq, rs_periods)
empty_ts = DataFrame(index=self.tsframe.index,
columns=self.tsframe.columns)
rs_freq = empty_ts.pct_change(freq=freq,
fill_method=fill_method,
limit=limit)
rs_periods = empty_ts.pct_change(periods,
fill_method=fill_method,
limit=limit)
assert_frame_equal(rs_freq, rs_periods)
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
assert np.issubdtype(df['B'].dtype, np.dtype('M8[ns]'))
def test_frame_append_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
assert np.issubdtype(df['A'].dtype, np.dtype('M8[ns]'))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_append_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O')).values
assert df[unit].dtype == ns_dtype
assert (df[unit].values == ex_vals).all()
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O')).values
assert (tmp['dates'].values == ex_vals).all()
def test_shift(self):
# naive shift
shiftedFrame = self.tsframe.shift(5)
tm.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
shiftedFrame = self.tsframe.shift(-5)
tm.assert_index_equal(shiftedFrame.index, self.tsframe.index)
shiftedSeries = self.tsframe['A'].shift(-5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
# shift by 0
unshifted = self.tsframe.shift(0)
assert_frame_equal(unshifted, self.tsframe)
# shift by DateOffset
shiftedFrame = self.tsframe.shift(5, freq=offsets.BDay())
assert len(shiftedFrame) == len(self.tsframe)
shiftedFrame2 = self.tsframe.shift(5, freq='B')
assert_frame_equal(shiftedFrame, shiftedFrame2)
d = self.tsframe.index[0]
shifted_d = d + offsets.BDay(5)
assert_series_equal(self.tsframe.xs(d),
shiftedFrame.xs(shifted_d), check_names=False)
# shift int frame
int_shifted = self.intframe.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.iloc[:, 0].dropna().values,
ps.iloc[:-1, 0].values)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, offsets.BDay())
assert_frame_equal(shifted2, shifted3)
assert_frame_equal(ps, shifted2.shift(-1, 'B'))
tm.assert_raises_regex(ValueError,
'does not match PeriodIndex freq',
ps.shift, freq='D')
# shift other axis
# GH 6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis=1)
assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat([DataFrame(np.nan, index=df.index,
columns=[0]),
df.iloc[:, 0:-1]],
ignore_index=True, axis=1)
result = df.shift(1, axis='columns')
assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({'high': [True, False],
'low': [False, False]})
rs = df.shift(1)
xp = DataFrame(np.array([[np.nan, np.nan],
[True, False]], dtype=object),
columns=['high', 'low'])
assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH 9416
s1 = pd.Series(['a', 'b', 'c'], dtype='category')
s2 = pd.Series(['A', 'B', 'C'], dtype='category')
df = DataFrame({'one': s1, 'two': s2})
rs = df.shift(1)
xp = DataFrame({'one': s1.shift(1), 'two': s2.shift(1)})
assert_frame_equal(rs, xp)
def test_shift_empty(self):
# Regression test for #8019
df = DataFrame({'foo': []})
rs = df.shift(-1)
assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH 9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = pd.DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
assert_series_equal(nulls, Series(range(1, 6), dtype='int64'))
# check all answers are the same
assert_frame_equal(shifted[0], shifted[1])
assert_frame_equal(shifted[0], shifted[2])
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
assert_frame_equal(shifted, shifted3)
tm.assert_raises_regex(
ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
shifted = self.tsframe.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(self.tsframe, unshifted)
shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)
assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(self.tsframe.values,
Index(np.asarray(self.tsframe.index)),
columns=self.tsframe.columns)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(shifted, self.tsframe.tshift(1))
assert_frame_equal(unshifted, inferred_ts)
no_freq = self.tsframe.iloc[[0, 5, 7], :]
pytest.raises(ValueError, no_freq.tshift)
def test_truncate(self):
ts = self.tsframe[::3]
start, end = self.tsframe.index[3], self.tsframe.index[6]
start_missing = self.tsframe.index[2]
end_missing = self.tsframe.index[7]
# neither specified
truncated = ts.truncate()
assert_frame_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_frame_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_frame_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_frame_equal(truncated, expected)
pytest.raises(ValueError, ts.truncate,
before=ts.index[-1] - 1,
after=ts.index[0] + 1)
def test_truncate_copy(self):
index = self.tsframe.index
truncated = self.tsframe.truncate(index[5], index[10])
truncated.values[:] = 5.
assert not (self.tsframe.values[5:11] == 5).any()
def test_truncate_nonsortedindex(self):
# GH 17935
df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e']},
index=[5, 3, 2, 9, 0])
with tm.assert_raises_regex(ValueError,
'truncate requires a sorted index'):
df.truncate(before=3, after=9)
rng = pd.date_range('2011-01-01', '2012-01-01', freq='W')
ts = pd.DataFrame({'A': np.random.randn(len(rng)),
'B': np.random.randn(len(rng))},
index=rng)
with tm.assert_raises_regex(ValueError,
'truncate requires a sorted index'):
ts.sort_values('A', ascending=False).truncate(before='2011-11',
after='2011-12')
df = pd.DataFrame({3: np.random.randn(5),
20: np.random.randn(5),
2: np.random.randn(5),
0: np.random.randn(5)},
columns=[3, 20, 2, 0])
with tm.assert_raises_regex(ValueError,
'truncate requires a sorted index'):
df.truncate(before=2, after=20, axis=1)
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(offsets.BMonthEnd())
rule_monthly = self.tsframe.asfreq('BM')
tm.assert_almost_equal(offset_monthly['A'], rule_monthly['A'])
filled = rule_monthly.asfreq('B', method='pad') # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq('B', method='pad') # noqa
# test does not blow up on length-0 DataFrame
zero_length = self.tsframe.reindex([])
result = zero_length.asfreq('BM')
assert result is not zero_length
def test_asfreq_datetimeindex(self):
df = DataFrame({'A': [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2),
datetime(2011, 11, 3)])
df = df.asfreq('B')
assert isinstance(df.index, DatetimeIndex)
ts = df['A'].asfreq('B')
assert isinstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self):
# test for fill value during upsampling, related to issue 3715
# setup
rng = pd.date_range('1/1/2016', periods=10, freq='2S')
ts = pd.Series(np.arange(len(rng)), index=rng)
df = pd.DataFrame({'one': ts})
# insert pre-existing missing value
df.loc['2016-01-01 00:00:08', 'one'] = None
actual_df = df.asfreq(freq='1S', fill_value=9.0)
expected_df = df.asfreq(freq='1S').fillna(9.0)
expected_df.loc['2016-01-01 00:00:08', 'one'] = None
assert_frame_equal(expected_df, actual_df)
expected_series = ts.asfreq(freq='1S').fillna(9.0)
actual_series = ts.asfreq(freq='1S', fill_value=9.0)
assert_series_equal(expected_series, actual_series)
@pytest.mark.parametrize("data,idx,expected_first,expected_last", [
({'A': [1, 2, 3]}, [1, 1, 2], 1, 2),
({'A': [1, 2, 3]}, [1, 2, 2], 1, 2),
({'A': [1, 2, 3, 4]}, ['d', 'd', 'd', 'd'], 'd', 'd'),
({'A': [1, np.nan, 3]}, [1, 1, 2], 1, 2),
({'A': [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2),
({'A': [1, np.nan, 3]}, [1, 2, 2], 1, 2)])
def test_first_last_valid(self, data, idx,
expected_first, expected_last):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
mat[-5:] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
index = frame.first_valid_index()
assert index == frame.index[5]
index = frame.last_valid_index()
assert index == frame.index[-6]
# GH12800
empty = DataFrame()
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
# GH17400: no valid entries
frame[:] = nan
assert frame.last_valid_index() is None
assert frame.first_valid_index() is None
# GH20499: its preserves freq with holes
frame.index = date_range("20110101", periods=N, freq="B")
frame.iloc[1] = 1
frame.iloc[-2] = 1
assert frame.first_valid_index() == frame.index[1]
assert frame.last_valid_index() == frame.index[-2]
assert frame.first_valid_index().freq == frame.index.freq
assert frame.last_valid_index().freq == frame.index.freq
# GH 21441
df = DataFrame(data, index=idx)
assert expected_first == df.first_valid_index()
assert expected_last == df.last_valid_index()
def test_first_subset(self):
ts = tm.makeTimeDataFrame(freq='12h')
result = ts.first('10d')
assert len(result) == 20
ts = tm.makeTimeDataFrame(freq='D')
result = ts.first('10d')
assert len(result) == 10
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_frame_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_frame_equal(result, expected)
result = ts[:0].first('3M')
assert_frame_equal(result, ts[:0])
def test_first_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.first('1D')
def test_last_subset(self):
ts = tm.makeTimeDataFrame(freq='12h')
result = ts.last('10d')
assert len(result) == 20
ts = tm.makeTimeDataFrame(nper=30, freq='D')
result = ts.last('10d')
assert len(result) == 10
result = ts.last('21D')
expected = ts['2000-01-10':]
assert_frame_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_frame_equal(result, expected)
result = ts[:0].last('3M')
assert_frame_equal(result, ts[:0])
def test_last_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.last('1D')
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.loc[time(9, 30)]
expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
assert len(rs) == 0
def test_at_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.at_time('00:00')
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.between_time(start_time='00:00', end_time='12:00')
def test_operation_on_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = pd.DataFrame({'foo': [pd.NaT, pd.NaT,
pd.Timestamp('2012-05-01')]})
res = df.min()
exp = pd.Series([pd.Timestamp('2012-05-01')], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.Timestamp('2012-05-01')], index=["foo"])
tm.assert_series_equal(res, exp)
# GH12941, only NaTs are in DataFrame.
df = pd.DataFrame({'foo': [pd.NaT, pd.NaT]})
res = df.min()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
def test_datetime_assignment_with_NaT_and_diff_time_units(self):
# GH 7492
data_ns = np.array([1, 'nat'], dtype='datetime64[ns]')
result = pd.Series(data_ns).to_frame()
result['new'] = data_ns
expected = pd.DataFrame({0: [1, None],
'new': [1, None]}, dtype='datetime64[ns]')
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, 'nat'], dtype='datetime64[s]')
result['new'] = data_s
expected = pd.DataFrame({0: [1, None],
'new': [1e9, None]}, dtype='datetime64[ns]')
tm.assert_frame_equal(result, expected)
def test_frame_to_period(self):
K = 5
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
tm.assert_index_equal(pts.index, exp.index.asfreq('M'))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
tm.assert_index_equal(pts.columns, exp.columns.asfreq('M'))
pytest.raises(ValueError, df.to_period, axis=2)
@pytest.mark.parametrize("fn", ['tz_localize', 'tz_convert'])
def test_tz_convert_and_localize(self, fn):
l0 = date_range('20140701', periods=5, freq='D')
l1 = date_range('20140701', periods=5, freq='D')
int_idx = Index(range(5))
if fn == 'tz_convert':
l0 = l0.tz_localize('UTC')
l1 = l1.tz_localize('UTC')
for idx in [l0, l1]:
l0_expected = getattr(idx, fn)('US/Pacific')
l1_expected = getattr(idx, fn)('US/Pacific')
df1 = DataFrame(np.ones(5), index=l0)
df1 = getattr(df1, fn)('US/Pacific')
assert_index_equal(df1.index, l0_expected)
# MultiIndex
# GH7846
df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))
df3 = getattr(df2, fn)('US/Pacific', level=0)
assert not df3.index.levels[0].equals(l0)
assert_index_equal(df3.index.levels[0], l0_expected)
assert_index_equal(df3.index.levels[1], l1)
assert not df3.index.levels[1].equals(l1_expected)
df3 = getattr(df2, fn)('US/Pacific', level=1)
assert_index_equal(df3.index.levels[0], l0)
assert not df3.index.levels[0].equals(l0_expected)
assert_index_equal(df3.index.levels[1], l1_expected)
assert not df3.index.levels[1].equals(l1)
df4 = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
# TODO: untested
df5 = getattr(df4, fn)('US/Pacific', level=1) # noqa
assert_index_equal(df3.index.levels[0], l0)
assert not df3.index.levels[0].equals(l0_expected)
assert_index_equal(df3.index.levels[1], l1_expected)
assert not df3.index.levels[1].equals(l1)
# Bad Inputs
# Not DatetimeIndex / PeriodIndex
with assert_raises_regex(TypeError, 'DatetimeIndex'):
df = DataFrame(index=int_idx)
df = getattr(df, fn)('US/Pacific')
# Not DatetimeIndex / PeriodIndex
with assert_raises_regex(TypeError, 'DatetimeIndex'):
df = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
df = getattr(df, fn)('US/Pacific', level=0)
# Invalid level
with assert_raises_regex(ValueError, 'not valid'):
df = DataFrame(index=l0)
df = getattr(df, fn)('US/Pacific', level=1)
| bsd-3-clause |
bthirion/scikit-learn | sklearn/decomposition/truncated_svd.py | 13 | 8301 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). Contrary to PCA, this
estimator does not center the data before computing the singular value
decomposition. This means it can work with scipy.sparse matrices
efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithms: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional (default 5)
Number of iterations for randomized SVD solver. Not used by ARPACK.
The default is larger than the default in `randomized_svd` to handle
sparse matrices that may have large slowly decaying spectrum.
random_state : int, RandomState instance or None, optional, default = None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ : array, shape (n_components,)
The variance of the training samples transformed by a projection to
each component.
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
singular_values_ : array, shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=7,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.0606... 0.0584... 0.0497... 0.0434... 0.0372...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.249...
>>> print(svd.singular_values_) # doctest: +ELLIPSIS
[ 2.5841... 2.5245... 2.3201... 2.1753... 2.0443...]
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
random_state = check_random_state(self.random_state)
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = U * Sigma
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
self.singular_values_ = Sigma # Store the singular values.
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
rknLA/sms-tools | lectures/06-Harmonic-model/plots-code/piano-autocorrelation.py | 26 | 1114 | import matplotlib.pyplot as plt
import numpy as np
import math
import time, os, sys
import essentia.standard as ess
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/piano.wav')
start = 13860
M = 800
xp = x[start:start+M]/float(max(x[start:start+M]))
r = ess.AutoCorrelation(normalization = 'standard')(xp)
r = r / max(r)
peaks = ess.PeakDetection(threshold =.11, interpolate = False, minPosition = .01)(r)
plt.figure(1, figsize=(9, 7))
plt.subplot(211)
plt.plot(np.arange(M)/float(fs), xp, lw=1.5)
plt.axis([0, (M-1)/float(fs), min(xp), max(xp)])
plt.xlabel('time (sec)')
plt.ylabel('amplitude')
plt.title('x (piano.wav)')
plt.subplot(212)
plt.plot(np.arange(M)/float(fs), r, 'r', lw=1.5)
plt.plot(peaks[0]*(M-1)/float(fs),peaks[1], 'x', color='k', markeredgewidth=1.5)
plt.axis([0, (M-1)/float(fs), min(r), max(r)])
plt.title('autocorrelation function + peaks')
plt.xlabel('lag time (sec)')
plt.ylabel('correlation')
plt.tight_layout()
plt.savefig('piano-autocorrelation.png')
plt.show() | agpl-3.0 |
djgagne/hagelslag | hagelslag/util/lsr_calibration_dataset.py | 1 | 6577 | from make_proj_grids import *
from netCDF4 import Dataset
import cartopy.crs as ccrs
import pandas as pd
import numpy as np
import argparse
import os
import pyproj
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--start_date", required=True, help="Start date in YYYY-MM-DD format")
parser.add_argument("-e", "--end_date", required=False, help="End date in YYYY-MM-DD format")
parser.add_argument("-o", "--out_path", required=True, help="Path to the destination of MESH verification data")
parser.add_argument("-m", "--map_file", required=True, help="Path to the ensemble mapfile")
args = parser.parse_args()
if args.end_date:
run_dates = pd.date_range(start=args.start_date, end=args.end_date, freq='1D').strftime("%y%m%d")
else:
run_dates = pd.date_range(start=args.start_date, end=args.start_date, freq='1D').strftime("%y%m%d")
out_path = args.out_path
mapfile = args.map_file
LSR_calibration_data(mapfile,out_path,run_dates)
return
def LSR_calibration_data(mapfile,out_path,run_dates,hours=[17,19,21],sector=None):
"""
Using the grid from input ML forecast (netcdf) data, SPC storm reports
with a 25 mile radius around the reports can be plotted.
The output file contains binary data, where any point
within the 25 mile radius is a 1, and all other points are 0.
Currently only supports netcdf files.
"""
hail_threshold = [25,50]
lsr_dict = dict()
proj_dict, grid_dict = read_ncar_map_file(mapfile)
projection = pyproj.Proj(proj_dict)
mapping_data = make_proj_grids(proj_dict, grid_dict)
forecast_lons = np.array(mapping_data['lon'])
forecast_lats = np.array(mapping_data['lat'])
forecast_x = np.array(mapping_data['x'])
forecast_y = np.array(mapping_data['y'])
for date in run_dates:
print(date)
csv_file = 'https://www.spc.noaa.gov/climo/reports/{0}_rpts_hail.csv'.format(date)
try:
hail_reports = pd.read_csv(csv_file)
except:
print('Report CSV file could not be opened.')
continue
for threshold in hail_threshold:
#if os.path.exists(out_path+'{0}_{1}_lsr_mask.nc'.format(date,threshold)):
# print('>{0}mm file already exists'.format(threshold))
# continue
#print('Creating LSR mask >{0}mm'.format(threshold))
#Get size values from hail reports
inches_thresh = round((threshold)*0.03937)*100
report_size = hail_reports.loc[:,'Size'].values
lsr_dict['full_day'] = np.zeros(forecast_lats.shape)
full_day_indices = np.where(report_size >= inches_thresh)[0]
if len(full_day_indices) < 1:
print('No >{0}mm LSRs found'.format(threshold))
continue
reports_lat_full = hail_reports.loc[full_day_indices,'Lat'].values
reports_lon_full = hail_reports.loc[full_day_indices,'Lon'].values
lsr_dict['full_day'] = calculate_distance(reports_lat_full,reports_lon_full,forecast_y,forecast_x,projection)
#Get time values from hail reports
report_time = (hail_reports.loc[:,'Time'].values)/100
#Get lat/lon of different time periods and hail sizes
for start_hour in hours:
lsr_dict['{0}'.format(start_hour)] = np.zeros(forecast_lats.shape)
end_hour = (start_hour+4)%24
if end_hour > 12:
hour_indices = np.where((start_hour <= report_time) & (end_hour >= report_time) & (report_size >= inches_thresh))[0]
else:
#Find reports before and after 0z
hour_before_0z = np.where((start_hour <= report_time) & (report_size >= inches_thresh))[0]
hour_after_0z = np.where((end_hour >= report_time) & (report_size >= inches_thresh))[0]
#Combine two arrays
hour_indices = np.hstack((hour_before_0z, hour_after_0z))
if len(hour_indices) < 1:
continue
reports_lat = hail_reports.loc[hour_indices,'Lat'].values
reports_lon = hail_reports.loc[hour_indices,'Lon'].values
lsr_dict['{0}'.format(start_hour)] = calculate_distance(reports_lat,reports_lon,forecast_y,forecast_x,projection)
# Create netCDF file
if sector:
out_filename = out_path+'{0}_{1}_{2}_lsr_mask.nc'.format(date,threshold,sector)
else:
out_filename = out_path+'{0}_{1}_lsr_mask.nc'.format(date,threshold)
out_file = Dataset(out_filename, "w")
out_file.createDimension("x", forecast_lons.shape[0])
out_file.createDimension("y", forecast_lons.shape[1])
out_file.createVariable("Longitude", "f4", ("x", "y"))
out_file.createVariable("Latitude", "f4",("x", "y"))
out_file.createVariable("24_Hour_All_12z_12z", "f4", ("x", "y"))
out_file.createVariable("4_Hour_All_17z_21z", "f4", ("x", "y"))
out_file.createVariable("4_Hour_All_19z_23z", "f4", ("x", "y"))
out_file.createVariable("4_Hour_All_21z_25z", "f4", ("x", "y"))
out_file.variables["Longitude"][:,:] = forecast_lons
out_file.variables["Latitude"][:,:] = forecast_lats
out_file.variables["24_Hour_All_12z_12z"][:,:] = lsr_dict['full_day']
out_file.variables["4_Hour_All_17z_21z"][:,:] = lsr_dict['17']
out_file.variables["4_Hour_All_19z_23z"][:,:] = lsr_dict['19']
out_file.variables["4_Hour_All_21z_25z"][:,:] = lsr_dict['21']
out_file.close()
print("Writing to " + out_filename)
print()
return
def calculate_distance(obs_lat,obs_lon,forecast_y,forecast_x,projection):
"""
Calculate the difference between forecast data points and observed data.
Returns:
Binary array where ones are within a 40km radius
"""
x,y = projection(obs_lon,obs_lat)
mask_array = np.zeros(forecast_y.shape)
for index, point in enumerate(obs_lat):
y_diff = (y[index]-forecast_y)**2.0
x_diff = (x[index]-forecast_x)**2.0
total_dist = np.sqrt(y_diff+x_diff)
row, col = np.where(total_dist < 40234.0)
mask_array[row,col] =+ 1.0
return mask_array
if __name__ == "__main__":
main()
| mit |
gfyoung/pandas | pandas/tests/indexes/timedeltas/test_formats.py | 2 | 3280 | import pytest
import pandas as pd
from pandas import Series, TimedeltaIndex
class TestTimedeltaIndexRendering:
@pytest.mark.parametrize("method", ["__repr__", "__str__"])
def test_representation(self, method):
idx1 = TimedeltaIndex([], freq="D")
idx2 = TimedeltaIndex(["1 days"], freq="D")
idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D")
idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D")
idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"])
exp1 = "TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"
exp2 = "TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', freq='D')"
exp3 = "TimedeltaIndex(['1 days', '2 days'], dtype='timedelta64[ns]', freq='D')"
exp4 = (
"TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')"
)
exp5 = (
"TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)"
)
with pd.option_context("display.width", 300):
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]
):
result = getattr(idx, method)()
assert result == expected
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq="D")
idx2 = TimedeltaIndex(["1 days"], freq="D")
idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D")
idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D")
idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = "0 1 days\ndtype: timedelta64[ns]"
exp3 = "0 1 days\n1 2 days\ndtype: timedelta64[ns]"
exp4 = "0 1 days\n1 2 days\n2 3 days\ndtype: timedelta64[ns]"
exp5 = (
"0 1 days 00:00:01\n"
"1 2 days 00:00:00\n"
"2 3 days 00:00:00\n"
"dtype: timedelta64[ns]"
)
with pd.option_context("display.width", 300):
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]
):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH#9116
idx1 = TimedeltaIndex([], freq="D")
idx2 = TimedeltaIndex(["1 days"], freq="D")
idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D")
idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D")
idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"])
exp1 = "TimedeltaIndex: 0 entries\nFreq: D"
exp2 = "TimedeltaIndex: 1 entries, 1 days to 1 days\nFreq: D"
exp3 = "TimedeltaIndex: 2 entries, 1 days to 2 days\nFreq: D"
exp4 = "TimedeltaIndex: 3 entries, 1 days to 3 days\nFreq: D"
exp5 = "TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days 00:00:00"
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]
):
result = idx._summary()
assert result == expected
| bsd-3-clause |
davideanastasia/vantgrd-py | vantgrd/logistic/logistic_regression_ftrl.py | 1 | 5135 | # Copyright 2017 Davide Anastasia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from math import fabs, sqrt
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import check_classification_targets
from vantgrd.common import logloss, sigmoid, compute_class_weight
from vantgrd.common import ClassificationTrainTracker
class LogisticRegressionFTRL(BaseEstimator, ClassifierMixin):
def __init__(self, alpha=0.05, beta=0.05, l1=.01, l2=.01, epochs=1, rate=50000, class_weight=None):
self.alpha = alpha
self.beta = beta
self.l1 = l1
self.l2 = l2
self.epochs = epochs
self.class_weight = class_weight
self.classes_ = None
self.X_ = None
self.y_ = None
self.z_ = None
self.n_ = None
self.fit_flag_ = False
self.train_tracker_ = ClassificationTrainTracker(rate)
def _clear_params(self):
# All models parameters are set to their original value (see __init__ description)
self.classes_ = None
self.class_weight_ = None
self.log_likelihood_ = 0
self.loss_ = []
self.target_ratio_ = 0.
self.X_ = None
self.y_ = None
self.z_ = None
self.n_ = None
self.fit_flag_ = False
def _update_class_weight(self, _X, _y):
if self.class_weight is None:
self.class_weight_ = compute_class_weight(_y)
else:
self.class_weight_ = self.class_weight
def _update(self, y, p, x, w):
d = (p - y)
for idxi in range(len(x)): # for idxi, xi in enumerate(x):
g = d * x[idxi]
s = (sqrt(self.n_[idxi] + g * g) - sqrt(self.n_[idxi])) / self.alpha
self.z_[idxi] += self.class_weight_[y] * (g - s * w[idxi])
self.n_[idxi] += self.class_weight_[y] * (g * g)
def _get_w(self, idxi):
if fabs(self.z_[idxi]) <= self.l1:
return 0.
else:
sign = 1. if self.z_[idxi] >= 0 else -1.
return - (self.z_[idxi] - sign * self.l1) / (self.l2 + (self.beta + sqrt(self.n_[idxi])) / self.alpha)
def _train(self, X, y, n_samples, n_features):
iter_idx = np.arange(n_samples)
np.random.shuffle(iter_idx)
for t, data_idx in enumerate(iter_idx):
curr_x = X[data_idx, :]
curr_y = y[data_idx]
wtx = 0.
curr_w = {}
for idxi in range(n_features):
curr_w[idxi] = self._get_w(idxi)
wtx += (curr_w[idxi] * curr_x[idxi])
curr_p = sigmoid(wtx)
log_likelihood = logloss(curr_p, curr_y)
self.train_tracker_.track(log_likelihood)
self._update(curr_y, curr_p, curr_x, curr_w)
def fit(self, X, y):
if self.fit_flag_:
self._clear_params()
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_ = unique_labels(y)
self.X_ = X
self.y_ = y
# setup parameters
n_samples, n_features = X.shape
if self.z_ is None and self.n_ is None:
self.z_ = np.zeros(n_features)
self.n_ = np.zeros(n_features)
self._update_class_weight(X, y)
self.train_tracker_.start_train()
for epoch in range(self.epochs):
self.train_tracker_.start_epoch(epoch)
self._train(X, y, n_samples, n_features)
self.train_tracker_.end_epoch()
self.train_tracker_.end_train()
self.fit_flag_ = True
return self
def predict(self, X):
check_is_fitted(self, ['X_', 'y_'])
X = check_array(X)
n_samples, n_features = X.shape
y_test_predict = np.zeros(n_samples)
w = np.zeros(n_features)
for idxi in range(n_features):
w[idxi] = self._get_w(idxi)
# print w
for t in range(n_samples):
x = X[t, :]
wtx = np.dot(w, x)
p = sigmoid(wtx)
y_test_predict[t] = 0. if p < 0.5 else 1.
return y_test_predict
def raw_predict(self, X):
check_is_fitted(self, ['X_', 'y_'])
X = check_array(X)
n_samples, n_features = X.shape
w = np.zeros(n_features)
for idxi in range(n_features):
w[idxi] = self._get_w(idxi)
y = np.dot(X, w)
for idxi in range(y.size):
y[idxi] = sigmoid(y[idxi])
return y
| apache-2.0 |
badlogicmanpreet/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/__init__.py | 69 | 28184 | """
This is an object-orient plotting library.
A procedural interface is provided by the companion pylab module,
which may be imported directly, e.g::
from pylab import *
or using ipython::
ipython -pylab
For the most part, direct use of the object-oriented library is
encouraged when programming rather than working interactively. The
exceptions are the pylab commands :func:`~matplotlib.pyplot.figure`,
:func:`~matplotlib.pyplot.subplot`,
:func:`~matplotlib.backends.backend_qt4agg.show`, and
:func:`~pyplot.savefig`, which can greatly simplify scripting.
Modules include:
:mod:`matplotlib.axes`
defines the :class:`~matplotlib.axes.Axes` class. Most pylab
commands are wrappers for :class:`~matplotlib.axes.Axes`
methods. The axes module is the highest level of OO access to
the library.
:mod:`matplotlib.figure`
defines the :class:`~matplotlib.figure.Figure` class.
:mod:`matplotlib.artist`
defines the :class:`~matplotlib.artist.Artist` base class for
all classes that draw things.
:mod:`matplotlib.lines`
defines the :class:`~matplotlib.lines.Line2D` class for
drawing lines and markers
:mod`matplotlib.patches`
defines classes for drawing polygons
:mod:`matplotlib.text`
defines the :class:`~matplotlib.text.Text`,
:class:`~matplotlib.text.TextWithDash`, and
:class:`~matplotlib.text.Annotate` classes
:mod:`matplotlib.image`
defines the :class:`~matplotlib.image.AxesImage` and
:class:`~matplotlib.image.FigureImage` classes
:mod:`matplotlib.collections`
classes for efficient drawing of groups of lines or polygons
:mod:`matplotlib.colors`
classes for interpreting color specifications and for making
colormaps
:mod:`matplotlib.cm`
colormaps and the :class:`~matplotlib.image.ScalarMappable`
mixin class for providing color mapping functionality to other
classes
:mod:`matplotlib.ticker`
classes for calculating tick mark locations and for formatting
tick labels
:mod:`matplotlib.backends`
a subpackage with modules for various gui libraries and output
formats
The base matplotlib namespace includes:
:data:`~matplotlib.rcParams`
a global dictionary of default configuration settings. It is
initialized by code which may be overridded by a matplotlibrc
file.
:func:`~matplotlib.rc`
a function for setting groups of rcParams values
:func:`~matplotlib.use`
a function for setting the matplotlib backend. If used, this
function must be called immediately after importing matplotlib
for the first time. In particular, it must be called
**before** importing pylab (if pylab is imported).
matplotlib is written by John D. Hunter (jdh2358 at gmail.com) and a
host of others.
"""
from __future__ import generators
__version__ = '0.98.5.2'
__revision__ = '$Revision: 6660 $'
__date__ = '$Date: 2008-12-18 06:10:51 -0600 (Thu, 18 Dec 2008) $'
import os, re, shutil, subprocess, sys, warnings
import distutils.sysconfig
import distutils.version
NEWCONFIG = False
# Needed for toolkit setuptools support
if 0:
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
pass # must not have setuptools
if not hasattr(sys, 'argv'): # for modpython
sys.argv = ['modpython']
"""
Manage user customizations through a rc file.
The default file location is given in the following order
- environment variable MATPLOTLIBRC
- HOME/.matplotlib/matplotlibrc if HOME is defined
- PATH/matplotlibrc where PATH is the return value of
get_data_path()
"""
import sys, os, tempfile
from rcsetup import defaultParams, validate_backend, validate_toolbar
from rcsetup import validate_cairo_format
major, minor1, minor2, s, tmp = sys.version_info
_python24 = major>=2 and minor1>=4
# the havedate check was a legacy from old matplotlib which preceeded
# datetime support
_havedate = True
#try:
# import pkg_resources # pkg_resources is part of setuptools
#except ImportError: _have_pkg_resources = False
#else: _have_pkg_resources = True
if not _python24:
raise ImportError('matplotlib requires Python 2.4 or later')
import numpy
nn = numpy.__version__.split('.')
if not (int(nn[0]) >= 1 and int(nn[1]) >= 1):
raise ImportError(
'numpy 1.1 or later is required; you have %s' % numpy.__version__)
def is_string_like(obj):
if hasattr(obj, 'shape'): return 0
try: obj + ''
except (TypeError, ValueError): return 0
return 1
def _is_writable_dir(p):
"""
p is a string pointing to a putative writable dir -- return True p
is such a string, else False
"""
try: p + '' # test is string like
except TypeError: return False
try:
t = tempfile.TemporaryFile(dir=p)
t.write('1')
t.close()
except OSError: return False
else: return True
class Verbose:
"""
A class to handle reporting. Set the fileo attribute to any file
instance to handle the output. Default is sys.stdout
"""
levels = ('silent', 'helpful', 'debug', 'debug-annoying')
vald = dict( [(level, i) for i,level in enumerate(levels)])
# parse the verbosity from the command line; flags look like
# --verbose-silent or --verbose-helpful
_commandLineVerbose = None
for arg in sys.argv[1:]:
if not arg.startswith('--verbose-'): continue
_commandLineVerbose = arg[10:]
def __init__(self):
self.set_level('silent')
self.fileo = sys.stdout
def set_level(self, level):
'set the verbosity to one of the Verbose.levels strings'
if self._commandLineVerbose is not None:
level = self._commandLineVerbose
if level not in self.levels:
raise ValueError('Illegal verbose string "%s". Legal values are %s'%(level, self.levels))
self.level = level
def set_fileo(self, fname):
std = {
'sys.stdout': sys.stdout,
'sys.stderr': sys.stderr,
}
if fname in std:
self.fileo = std[fname]
else:
try:
fileo = file(fname, 'w')
except IOError:
raise ValueError('Verbose object could not open log file "%s" for writing.\nCheck your matplotlibrc verbose.fileo setting'%fname)
else:
self.fileo = fileo
def report(self, s, level='helpful'):
"""
print message s to self.fileo if self.level>=level. Return
value indicates whether a message was issued
"""
if self.ge(level):
print >>self.fileo, s
return True
return False
def wrap(self, fmt, func, level='helpful', always=True):
"""
return a callable function that wraps func and reports it
output through the verbose handler if current verbosity level
is higher than level
if always is True, the report will occur on every function
call; otherwise only on the first time the function is called
"""
assert callable(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if (always or not wrapper._spoke):
spoke = self.report(fmt%ret, level)
if not wrapper._spoke: wrapper._spoke = spoke
return ret
wrapper._spoke = False
wrapper.__doc__ = func.__doc__
return wrapper
def ge(self, level):
'return true if self.level is >= level'
return self.vald[self.level]>=self.vald[level]
verbose=Verbose()
def checkdep_dvipng():
try:
s = subprocess.Popen(['dvipng','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = s.stdout.readlines()[1]
v = line.split()[-1]
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_ghostscript():
try:
if sys.platform == 'win32':
command_args = ['gswin32c', '--version']
else:
command_args = ['gs', '--version']
s = subprocess.Popen(command_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
v = s.stdout.read()[:-1]
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_tex():
try:
s = subprocess.Popen(['tex','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = s.stdout.readlines()[0]
pattern = '3\.1\d+'
match = re.search(pattern, line)
v = match.group(0)
return v
except (IndexError, ValueError, AttributeError, OSError):
return None
def checkdep_pdftops():
try:
s = subprocess.Popen(['pdftops','-v'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in s.stderr:
if 'version' in line:
v = line.split()[-1]
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def compare_versions(a, b):
"return True if a is greater than or equal to b"
if a:
a = distutils.version.LooseVersion(a)
b = distutils.version.LooseVersion(b)
if a>=b: return True
else: return False
else: return False
def checkdep_ps_distiller(s):
if not s:
return False
flag = True
gs_req = '7.07'
gs_sugg = '7.07'
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later '
'is recommended to use the ps.usedistiller option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller option can not be used '
'unless ghostscript-%s or later is installed on your system') % gs_req)
if s == 'xpdf':
pdftops_req = '3.0'
pdftops_req_alt = '0.9' # poppler version numbers, ugh
pdftops_v = checkdep_pdftops()
if compare_versions(pdftops_v, pdftops_req):
pass
elif compare_versions(pdftops_v, pdftops_req_alt) and not \
compare_versions(pdftops_v, '1.0'):
pass
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller can not be set to '
'xpdf unless xpdf-%s or later is installed on your system') % pdftops_req)
if flag:
return s
else:
return False
def checkdep_usetex(s):
if not s:
return False
tex_req = '3.1415'
gs_req = '7.07'
gs_sugg = '7.07'
dvipng_req = '1.5'
flag = True
tex_v = checkdep_tex()
if compare_versions(tex_v, tex_req): pass
else:
flag = False
warnings.warn(('matplotlibrc text.usetex option can not be used '
'unless TeX-%s or later is '
'installed on your system') % tex_req)
dvipng_v = checkdep_dvipng()
if compare_versions(dvipng_v, dvipng_req): pass
else:
flag = False
warnings.warn( 'matplotlibrc text.usetex can not be used with *Agg '
'backend unless dvipng-1.5 or later is '
'installed on your system')
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later is '
'recommended for use with the text.usetex '
'option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc text.usetex can not be used '
'unless ghostscript-%s or later is '
'installed on your system') % gs_req)
return flag
def _get_home():
"""Find user's home directory if possible.
Otherwise raise error.
:see: http://mail.python.org/pipermail/python-list/2005-February/263921.html
"""
path=''
try:
path=os.path.expanduser("~")
except:
pass
if not os.path.isdir(path):
for evar in ('HOME', 'USERPROFILE', 'TMP'):
try:
path = os.environ[evar]
if os.path.isdir(path):
break
except: pass
if path:
return path
else:
raise RuntimeError('please define environment variable $HOME')
get_home = verbose.wrap('$HOME=%s', _get_home, always=False)
def _get_configdir():
"""
Return the string representing the configuration dir.
default is HOME/.matplotlib. you can override this with the
MPLCONFIGDIR environment variable
"""
configdir = os.environ.get('MPLCONFIGDIR')
if configdir is not None:
if not _is_writable_dir(configdir):
raise RuntimeError('Could not write to MPLCONFIGDIR="%s"'%configdir)
return configdir
h = get_home()
p = os.path.join(get_home(), '.matplotlib')
if os.path.exists(p):
if not _is_writable_dir(p):
raise RuntimeError("'%s' is not a writable dir; you must set %s/.matplotlib to be a writable dir. You can also set environment variable MPLCONFIGDIR to any writable directory where you want matplotlib data stored "% (h, h))
else:
if not _is_writable_dir(h):
raise RuntimeError("Failed to create %s/.matplotlib; consider setting MPLCONFIGDIR to a writable directory for matplotlib configuration data"%h)
os.mkdir(p)
return p
get_configdir = verbose.wrap('CONFIGDIR=%s', _get_configdir, always=False)
def _get_data_path():
'get the path to matplotlib data'
if 'MATPLOTLIBDATA' in os.environ:
path = os.environ['MATPLOTLIBDATA']
if not os.path.isdir(path):
raise RuntimeError('Path in environment MATPLOTLIBDATA not a directory')
return path
path = os.sep.join([os.path.dirname(__file__), 'mpl-data'])
if os.path.isdir(path): return path
# setuptools' namespace_packages may highjack this init file
# so need to try something known to be in matplotlib, not basemap
import matplotlib.afm
path = os.sep.join([os.path.dirname(matplotlib.afm.__file__), 'mpl-data'])
if os.path.isdir(path): return path
# py2exe zips pure python, so still need special check
if getattr(sys,'frozen',None):
path = os.path.join(os.path.split(sys.path[0])[0], 'mpl-data')
if os.path.isdir(path): return path
else:
# Try again assuming we need to step up one more directory
path = os.path.join(os.path.split(os.path.split(sys.path[0])[0])[0],
'mpl-data')
if os.path.isdir(path): return path
else:
# Try again assuming sys.path[0] is a dir not a exe
path = os.path.join(sys.path[0], 'mpl-data')
if os.path.isdir(path): return path
raise RuntimeError('Could not find the matplotlib data files')
def _get_data_path_cached():
if defaultParams['datapath'][0] is None:
defaultParams['datapath'][0] = _get_data_path()
return defaultParams['datapath'][0]
get_data_path = verbose.wrap('matplotlib data path %s', _get_data_path_cached,
always=False)
def get_example_data(fname):
"""
return a filehandle to one of the example files in mpl-data/example
*fname*
the name of one of the files in mpl-data/example
"""
datadir = os.path.join(get_data_path(), 'example')
fullpath = os.path.join(datadir, fname)
if not os.path.exists(fullpath):
raise IOError('could not find matplotlib example file "%s" in data directory "%s"'%(
fname, datadir))
return file(fullpath, 'rb')
def get_py2exe_datafiles():
datapath = get_data_path()
head, tail = os.path.split(datapath)
d = {}
for root, dirs, files in os.walk(datapath):
# Need to explicitly remove cocoa_agg files or py2exe complains
# NOTE I dont know why, but do as previous version
if 'Matplotlib.nib' in files:
files.remove('Matplotlib.nib')
files = [os.path.join(root, filename) for filename in files]
root = root.replace(tail, 'mpl-data')
root = root[root.index('mpl-data'):]
d[root] = files
return d.items()
def matplotlib_fname():
"""
Return the path to the rc file
Search order:
* current working dir
* environ var MATPLOTLIBRC
* HOME/.matplotlib/matplotlibrc
* MATPLOTLIBDATA/matplotlibrc
"""
oldname = os.path.join( os.getcwd(), '.matplotlibrc')
if os.path.exists(oldname):
print >> sys.stderr, """\
WARNING: Old rc filename ".matplotlibrc" found in working dir
and and renamed to new default rc file name "matplotlibrc"
(no leading"dot"). """
shutil.move('.matplotlibrc', 'matplotlibrc')
home = get_home()
oldname = os.path.join( home, '.matplotlibrc')
if os.path.exists(oldname):
configdir = get_configdir()
newname = os.path.join(configdir, 'matplotlibrc')
print >> sys.stderr, """\
WARNING: Old rc filename "%s" found and renamed to
new default rc file name "%s"."""%(oldname, newname)
shutil.move(oldname, newname)
fname = os.path.join( os.getcwd(), 'matplotlibrc')
if os.path.exists(fname): return fname
if 'MATPLOTLIBRC' in os.environ:
path = os.environ['MATPLOTLIBRC']
if os.path.exists(path):
fname = os.path.join(path, 'matplotlibrc')
if os.path.exists(fname):
return fname
fname = os.path.join(get_configdir(), 'matplotlibrc')
if os.path.exists(fname): return fname
path = get_data_path() # guaranteed to exist or raise
fname = os.path.join(path, 'matplotlibrc')
if not os.path.exists(fname):
warnings.warn('Could not find matplotlibrc; using defaults')
return fname
_deprecated_map = {
'text.fontstyle': 'font.style',
'text.fontangle': 'font.style',
'text.fontvariant': 'font.variant',
'text.fontweight': 'font.weight',
'text.fontsize': 'font.size',
'tick.size' : 'tick.major.size',
}
class RcParams(dict):
"""
A dictionary object including validation
validating functions are defined and associated with rc parameters in
:mod:`matplotlib.rcsetup`
"""
validate = dict([ (key, converter) for key, (default, converter) in \
defaultParams.iteritems() ])
def __setitem__(self, key, val):
try:
if key in _deprecated_map.keys():
alt = _deprecated_map[key]
warnings.warn('%s is deprecated in matplotlibrc. Use %s \
instead.'% (key, alt))
key = alt
cval = self.validate[key](val)
dict.__setitem__(self, key, cval)
except KeyError:
raise KeyError('%s is not a valid rc parameter.\
See rcParams.keys() for a list of valid parameters.'%key)
def rc_params(fail_on_error=False):
'Return the default params updated from the values in the rc file'
fname = matplotlib_fname()
if not os.path.exists(fname):
# this should never happen, default in mpl-data should always be found
message = 'could not find rc file; returning defaults'
ret = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
warnings.warn(message)
return ret
cnt = 0
rc_temp = {}
for line in file(fname):
cnt += 1
strippedline = line.split('#',1)[0].strip()
if not strippedline: continue
tup = strippedline.split(':',1)
if len(tup) !=2:
warnings.warn('Illegal line #%d\n\t%s\n\tin file "%s"'%\
(cnt, line, fname))
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
warnings.warn('Duplicate key in file "%s", line #%d'%(fname,cnt))
rc_temp[key] = (val, line, cnt)
ret = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
for key in ('verbose.level', 'verbose.fileo'):
if key in rc_temp:
val, line, cnt = rc_temp.pop(key)
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception, msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
verbose.set_level(ret['verbose.level'])
verbose.set_fileo(ret['verbose.fileo'])
for key, (val, line, cnt) in rc_temp.iteritems():
if key in defaultParams:
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception, msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
else:
print >> sys.stderr, """
Bad key "%s" on line %d in
%s.
You probably need to get an updated matplotlibrc file from
http://matplotlib.sf.net/_static/matplotlibrc or from the matplotlib source
distribution""" % (key, cnt, fname)
if ret['datapath'] is None:
ret['datapath'] = get_data_path()
if not ret['text.latex.preamble'] == ['']:
verbose.report("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
"""% '\n'.join(ret['text.latex.preamble']), 'helpful')
verbose.report('loaded rc file %s'%fname)
return ret
# this is the instance used by the matplotlib classes
rcParams = rc_params()
rcParamsDefault = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
rcParams['ps.usedistiller'] = checkdep_ps_distiller(rcParams['ps.usedistiller'])
rcParams['text.usetex'] = checkdep_usetex(rcParams['text.usetex'])
def rc(group, **kwargs):
"""
Set the current rc params. Group is the grouping for the rc, eg.
for ``lines.linewidth`` the group is ``lines``, for
``axes.facecolor``, the group is ``axes``, and so on. Group may
also be a list or tuple of group names, eg. (*xtick*, *ytick*).
*kwargs* is a dictionary attribute name/value pairs, eg::
rc('lines', linewidth=2, color='r')
sets the current rc params and is equivalent to::
rcParams['lines.linewidth'] = 2
rcParams['lines.color'] = 'r'
The following aliases are available to save typing for interactive
users:
===== =================
Alias Property
===== =================
'lw' 'linewidth'
'ls' 'linestyle'
'c' 'color'
'fc' 'facecolor'
'ec' 'edgecolor'
'mew' 'markeredgewidth'
'aa' 'antialiased'
===== =================
Thus you could abbreviate the above rc command as::
rc('lines', lw=2, c='r')
Note you can use python's kwargs dictionary facility to store
dictionaries of default parameters. Eg, you can customize the
font rc as follows::
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 'larger'}
rc('font', **font) # pass in the font dict as kwargs
This enables you to easily switch between several configurations.
Use :func:`~matplotlib.pyplot.rcdefaults` to restore the default
rc params after changes.
"""
aliases = {
'lw' : 'linewidth',
'ls' : 'linestyle',
'c' : 'color',
'fc' : 'facecolor',
'ec' : 'edgecolor',
'mew' : 'markeredgewidth',
'aa' : 'antialiased',
}
if is_string_like(group):
group = (group,)
for g in group:
for k,v in kwargs.items():
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
if key not in rcParams:
raise KeyError('Unrecognized key "%s" for group "%s" and name "%s"' %
(key, g, name))
rcParams[key] = v
def rcdefaults():
"""
Restore the default rc params - the ones that were created at
matplotlib load time.
"""
rcParams.update(rcParamsDefault)
if NEWCONFIG:
#print "importing from reorganized config system!"
try:
from config import rcParams, rcdefaults, mplConfig, save_config
verbose.set_level(rcParams['verbose.level'])
verbose.set_fileo(rcParams['verbose.fileo'])
except:
from config import rcParams, rcdefaults
_use_error_msg = """ This call to matplotlib.use() has no effect
because the the backend has already been chosen;
matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
or matplotlib.backends is imported for the first time.
"""
def use(arg, warn=True):
"""
Set the matplotlib backend to one of the known backends.
The argument is case-insensitive. For the Cairo backend,
the argument can have an extension to indicate the type of
output. Example:
use('cairo.pdf')
will specify a default of pdf output generated by Cairo.
Note: this function must be called *before* importing pylab for
the first time; or, if you are not using pylab, it must be called
before importing matplotlib.backends. If warn is True, a warning
is issued if you try and callthis after pylab or pyplot have been
loaded. In certain black magic use cases, eg
pyplot.switch_backends, we are doing the reloading necessary to
make the backend switch work (in some cases, eg pure image
backends) so one can set warn=False to supporess the warnings
"""
if 'matplotlib.backends' in sys.modules:
if warn: warnings.warn(_use_error_msg)
return
arg = arg.lower()
if arg.startswith('module://'):
name = arg
else:
be_parts = arg.split('.')
name = validate_backend(be_parts[0])
rcParams['backend'] = name
if name == 'cairo' and len(be_parts) > 1:
rcParams['cairo.format'] = validate_cairo_format(be_parts[1])
def get_backend():
"Returns the current backend"
return rcParams['backend']
def interactive(b):
"""
Set interactive mode to boolean b.
If b is True, then draw after every plotting command, eg, after xlabel
"""
rcParams['interactive'] = b
def is_interactive():
'Return true if plot mode is interactive'
b = rcParams['interactive']
return b
def tk_window_focus():
"""Return true if focus maintenance under TkAgg on win32 is on.
This currently works only for python.exe and IPython.exe.
Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on."""
if rcParams['backend'] != 'TkAgg':
return False
return rcParams['tk.window_focus']
# Now allow command line to override
# Allow command line access to the backend with -d (matlab compatible
# flag)
for s in sys.argv[1:]:
if s.startswith('-d') and len(s) > 2: # look for a -d flag
try:
use(s[2:])
except (KeyError, ValueError):
pass
# we don't want to assume all -d flags are backends, eg -debug
verbose.report('matplotlib version %s'%__version__)
verbose.report('verbose.level %s'%verbose.level)
verbose.report('interactive is %s'%rcParams['interactive'])
verbose.report('units is %s'%rcParams['units'])
verbose.report('platform is %s'%sys.platform)
verbose.report('loaded modules: %s'%sys.modules.keys(), 'debug')
| agpl-3.0 |
sumspr/scikit-learn | sklearn/tree/tests/test_tree.py | 57 | 47417 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
| bsd-3-clause |
simon-pepin/scikit-learn | sklearn/tests/test_cross_validation.py | 70 | 41943 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer, LabelBinarizer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
with warnings.catch_warnings(record=True):
# deprecated sequence of sequence format
cv = cval.check_cv(3, X, y_seq_of_seqs, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_indicator_matrix = LabelBinarizer().fit_transform(y_seq_of_seqs)
cv = cval.check_cv(3, X, y_indicator_matrix, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/animation.py | 4 | 50572 | # TODO:
# * Loop Delay is broken on GTKAgg. This is because source_remove() is not
# working as we want. PyGTK bug?
# * Documentation -- this will need a new section of the User's Guide.
# Both for Animations and just timers.
# - Also need to update http://www.scipy.org/Cookbook/Matplotlib/Animations
# * Blit
# * Currently broken with Qt4 for widgets that don't start on screen
# * Still a few edge cases that aren't working correctly
# * Can this integrate better with existing matplotlib animation artist flag?
# - If animated removes from default draw(), perhaps we could use this to
# simplify initial draw.
# * Example
# * Frameless animation - pure procedural with no loop
# * Need example that uses something like inotify or subprocess
# * Complex syncing examples
# * Movies
# * Can blit be enabled for movies?
# * Need to consider event sources to allow clicking through multiple figures
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import os
import platform
import sys
import itertools
try:
# python3
from base64 import encodebytes
except ImportError:
# python2
from base64 import encodestring as encodebytes
import contextlib
import tempfile
import warnings
from matplotlib.cbook import iterable, is_string_like
from matplotlib.compat import subprocess
from matplotlib import verbose
from matplotlib import rcParams, rcParamsDefault, rc_context
# Process creation flag for subprocess to prevent it raising a terminal
# window. See for example:
# https://stackoverflow.com/questions/24130623/using-python-subprocess-popen-cant-prevent-exe-stopped-working-prompt
if platform.system() == 'Windows':
CREATE_NO_WINDOW = 0x08000000
subprocess_creation_flags = CREATE_NO_WINDOW
else:
# Apparently None won't work here
subprocess_creation_flags = 0
# Other potential writing methods:
# * http://pymedia.org/
# * libmng (produces swf) python wrappers: https://github.com/libming/libming
# * Wrap x264 API:
# (http://stackoverflow.com/questions/2940671/
# how-to-encode-series-of-images-into-h264-using-x264-api-c-c )
# A registry for available MovieWriter classes
class MovieWriterRegistry(object):
def __init__(self):
self.avail = dict()
# Returns a decorator that can be used on classes to register them under
# a name. As in:
# @register('foo')
# class Foo:
# pass
def register(self, name):
def wrapper(writerClass):
if writerClass.isAvailable():
self.avail[name] = writerClass
return writerClass
return wrapper
def list(self):
''' Get a list of available MovieWriters.'''
return list(self.avail.keys())
def is_available(self, name):
return name in self.avail
def __getitem__(self, name):
if not self.avail:
raise RuntimeError("No MovieWriters available!")
return self.avail[name]
writers = MovieWriterRegistry()
class MovieWriter(object):
'''
Base class for writing movies. Fundamentally, what a MovieWriter does
is provide is a way to grab frames by calling grab_frame(). setup()
is called to start the process and finish() is called afterwards.
This class is set up to provide for writing movie frame data to a pipe.
saving() is provided as a context manager to facilitate this process as::
with moviewriter.saving('myfile.mp4'):
# Iterate over frames
moviewriter.grab_frame()
The use of the context manager ensures that setup and cleanup are
performed as necessary.
frame_format: string
The format used in writing frame data, defaults to 'rgba'
'''
# Specifies whether the size of all frames need to be identical
# i.e. whether we can use savefig.bbox = 'tight'
frame_size_can_vary = False
def __init__(self, fps=5, codec=None, bitrate=None, extra_args=None,
metadata=None):
'''
Construct a new MovieWriter object.
fps: int
Framerate for movie.
codec: string or None, optional
The codec to use. If None (the default) the setting in the
rcParam `animation.codec` is used.
bitrate: int or None, optional
The bitrate for the saved movie file, which is one way to control
the output file size and quality. The default value is None,
which uses the value stored in the rcParam `animation.bitrate`.
A value of -1 implies that the bitrate should be determined
automatically by the underlying utility.
extra_args: list of strings or None
A list of extra string arguments to be passed to the underlying
movie utility. The default is None, which passes the additional
arguments in the 'animation.extra_args' rcParam.
metadata: dict of string:string or None
A dictionary of keys and values for metadata to include in the
output file. Some keys that may be of use include:
title, artist, genre, subject, copyright, srcform, comment.
'''
self.fps = fps
self.frame_format = 'rgba'
if codec is None:
self.codec = rcParams['animation.codec']
else:
self.codec = codec
if bitrate is None:
self.bitrate = rcParams['animation.bitrate']
else:
self.bitrate = bitrate
if extra_args is None:
self.extra_args = list(rcParams[self.args_key])
else:
self.extra_args = extra_args
if metadata is None:
self.metadata = dict()
else:
self.metadata = metadata
@property
def frame_size(self):
'A tuple (width,height) in pixels of a movie frame.'
width_inches, height_inches = self.fig.get_size_inches()
return width_inches * self.dpi, height_inches * self.dpi
def setup(self, fig, outfile, dpi, *args):
'''
Perform setup for writing the movie file.
fig: `matplotlib.Figure` instance
The figure object that contains the information for frames
outfile: string
The filename of the resulting movie file
dpi: int
The DPI (or resolution) for the file. This controls the size
in pixels of the resulting movie file.
'''
self.outfile = outfile
self.fig = fig
self.dpi = dpi
# Run here so that grab_frame() can write the data to a pipe. This
# eliminates the need for temp files.
self._run()
@contextlib.contextmanager
def saving(self, *args):
'''
Context manager to facilitate writing the movie file.
``*args`` are any parameters that should be passed to `setup`.
'''
# This particular sequence is what contextlib.contextmanager wants
self.setup(*args)
yield
self.finish()
def _run(self):
# Uses subprocess to call the program for assembling frames into a
# movie file. *args* returns the sequence of command line arguments
# from a few configuration options.
command = self._args()
if verbose.ge('debug'):
output = sys.stdout
else:
output = subprocess.PIPE
verbose.report('MovieWriter.run: running command: %s' %
' '.join(command))
self._proc = subprocess.Popen(command, shell=False,
stdout=output, stderr=output,
stdin=subprocess.PIPE,
creationflags=subprocess_creation_flags)
def finish(self):
'Finish any processing for writing the movie.'
self.cleanup()
def grab_frame(self, **savefig_kwargs):
'''
Grab the image information from the figure and save as a movie frame.
All keyword arguments in savefig_kwargs are passed on to the 'savefig'
command that saves the figure.
'''
verbose.report('MovieWriter.grab_frame: Grabbing frame.',
level='debug')
try:
# Tell the figure to save its data to the sink, using the
# frame format and dpi.
self.fig.savefig(self._frame_sink(), format=self.frame_format,
dpi=self.dpi, **savefig_kwargs)
except (RuntimeError, IOError) as e:
out, err = self._proc.communicate()
verbose.report('MovieWriter -- Error '
'running proc:\n%s\n%s' % (out,
err), level='helpful')
raise IOError('Error saving animation to file (cause: {0}) '
'Stdout: {1} StdError: {2}. It may help to re-run '
'with --verbose-debug.'.format(e, out, err))
def _frame_sink(self):
'Returns the place to which frames should be written.'
return self._proc.stdin
def _args(self):
'Assemble list of utility-specific command-line arguments.'
return NotImplementedError("args needs to be implemented by subclass.")
def cleanup(self):
'Clean-up and collect the process used to write the movie file.'
out, err = self._proc.communicate()
self._frame_sink().close()
verbose.report('MovieWriter -- '
'Command stdout:\n%s' % out, level='debug')
verbose.report('MovieWriter -- '
'Command stderr:\n%s' % err, level='debug')
@classmethod
def bin_path(cls):
'''
Returns the binary path to the commandline tool used by a specific
subclass. This is a class method so that the tool can be looked for
before making a particular MovieWriter subclass available.
'''
return rcParams[cls.exec_key]
@classmethod
def isAvailable(cls):
'''
Check to see if a MovieWriter subclass is actually available by
running the commandline tool.
'''
if not cls.bin_path():
return False
try:
p = subprocess.Popen(cls.bin_path(),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
creationflags=subprocess_creation_flags)
p.communicate()
return True
except OSError:
return False
class FileMovieWriter(MovieWriter):
'`MovieWriter` subclass that handles writing to a file.'
# In general, if frames are writen to files on disk, it's not important
# that they all be identically sized
frame_size_can_vary = True
def __init__(self, *args, **kwargs):
MovieWriter.__init__(self, *args, **kwargs)
self.frame_format = rcParams['animation.frame_format']
def setup(self, fig, outfile, dpi, frame_prefix='_tmp', clear_temp=True):
'''
Perform setup for writing the movie file.
fig: `matplotlib.Figure` instance
The figure object that contains the information for frames
outfile: string
The filename of the resulting movie file
dpi: int
The DPI (or resolution) for the file. This controls the size
in pixels of the resulting movie file.
frame_prefix: string, optional
The filename prefix to use for the temporary files. Defaults
to '_tmp'
clear_temp: bool
Specifies whether the temporary files should be deleted after
the movie is written. (Useful for debugging.) Defaults to True.
'''
self.fig = fig
self.outfile = outfile
self.dpi = dpi
self.clear_temp = clear_temp
self.temp_prefix = frame_prefix
self._frame_counter = 0 # used for generating sequential file names
self._temp_names = list()
self.fname_format_str = '%s%%07d.%s'
@property
def frame_format(self):
'''
Format (png, jpeg, etc.) to use for saving the frames, which can be
decided by the individual subclasses.
'''
return self._frame_format
@frame_format.setter
def frame_format(self, frame_format):
if frame_format in self.supported_formats:
self._frame_format = frame_format
else:
self._frame_format = self.supported_formats[0]
def _base_temp_name(self):
# Generates a template name (without number) given the frame format
# for extension and the prefix.
return self.fname_format_str % (self.temp_prefix, self.frame_format)
def _frame_sink(self):
# Creates a filename for saving using the basename and the current
# counter.
fname = self._base_temp_name() % self._frame_counter
# Save the filename so we can delete it later if necessary
self._temp_names.append(fname)
verbose.report(
'FileMovieWriter.frame_sink: saving frame %d to fname=%s' %
(self._frame_counter, fname),
level='debug')
self._frame_counter += 1 # Ensures each created name is 'unique'
# This file returned here will be closed once it's used by savefig()
# because it will no longer be referenced and will be gc-ed.
return open(fname, 'wb')
def grab_frame(self, **savefig_kwargs):
'''
Grab the image information from the figure and save as a movie frame.
All keyword arguments in savefig_kwargs are passed on to the 'savefig'
command that saves the figure.
'''
# Overloaded to explicitly close temp file.
verbose.report('MovieWriter.grab_frame: Grabbing frame.',
level='debug')
try:
# Tell the figure to save its data to the sink, using the
# frame format and dpi.
myframesink = self._frame_sink()
self.fig.savefig(myframesink, format=self.frame_format,
dpi=self.dpi, **savefig_kwargs)
myframesink.close()
except RuntimeError:
out, err = self._proc.communicate()
verbose.report('MovieWriter -- Error '
'running proc:\n%s\n%s' % (out,
err), level='helpful')
raise
def finish(self):
# Call run here now that all frame grabbing is done. All temp files
# are available to be assembled.
self._run()
MovieWriter.finish(self) # Will call clean-up
# Check error code for creating file here, since we just run
# the process here, rather than having an open pipe.
if self._proc.returncode:
raise RuntimeError('Error creating movie, return code: '
+ str(self._proc.returncode)
+ ' Try running with --verbose-debug')
def cleanup(self):
MovieWriter.cleanup(self)
# Delete temporary files
if self.clear_temp:
verbose.report(
'MovieWriter: clearing temporary fnames=%s' %
str(self._temp_names),
level='debug')
for fname in self._temp_names:
os.remove(fname)
# Base class of ffmpeg information. Has the config keys and the common set
# of arguments that controls the *output* side of things.
class FFMpegBase(object):
exec_key = 'animation.ffmpeg_path'
args_key = 'animation.ffmpeg_args'
@property
def output_args(self):
args = ['-vcodec', self.codec]
# For h264, the default format is yuv444p, which is not compatible
# with quicktime (and others). Specifying yuv420p fixes playback on
# iOS,as well as HTML5 video in firefox and safari (on both Win and
# OSX). Also fixes internet explorer. This is as of 2015/10/29.
if self.codec == 'h264' and '-pix_fmt' not in self.extra_args:
args.extend(['-pix_fmt', 'yuv420p'])
# The %dk adds 'k' as a suffix so that ffmpeg treats our bitrate as in
# kbps
if self.bitrate > 0:
args.extend(['-b', '%dk' % self.bitrate])
if self.extra_args:
args.extend(self.extra_args)
for k, v in six.iteritems(self.metadata):
args.extend(['-metadata', '%s=%s' % (k, v)])
return args + ['-y', self.outfile]
# Combine FFMpeg options with pipe-based writing
@writers.register('ffmpeg')
class FFMpegWriter(MovieWriter, FFMpegBase):
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a pipe.
args = [self.bin_path(), '-f', 'rawvideo', '-vcodec', 'rawvideo',
'-s', '%dx%d' % self.frame_size, '-pix_fmt', self.frame_format,
'-r', str(self.fps)]
# Logging is quieted because subprocess.PIPE has limited buffer size.
if not verbose.ge('debug'):
args += ['-loglevel', 'quiet']
args += ['-i', 'pipe:'] + self.output_args
return args
# Combine FFMpeg options with temp file-based writing
@writers.register('ffmpeg_file')
class FFMpegFileWriter(FileMovieWriter, FFMpegBase):
supported_formats = ['png', 'jpeg', 'ppm', 'tiff', 'sgi', 'bmp',
'pbm', 'raw', 'rgba']
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a collection of temp images
return [self.bin_path(), '-i', self._base_temp_name(),
'-vframes', str(self._frame_counter),
'-r', str(self.fps)] + self.output_args
# Base class of avconv information. AVConv has identical arguments to
# FFMpeg
class AVConvBase(FFMpegBase):
exec_key = 'animation.avconv_path'
args_key = 'animation.avconv_args'
# Combine AVConv options with pipe-based writing
@writers.register('avconv')
class AVConvWriter(AVConvBase, FFMpegWriter):
pass
# Combine AVConv options with file-based writing
@writers.register('avconv_file')
class AVConvFileWriter(AVConvBase, FFMpegFileWriter):
pass
# Base class of mencoder information. Contains configuration key information
# as well as arguments for controlling *output*
class MencoderBase(object):
exec_key = 'animation.mencoder_path'
args_key = 'animation.mencoder_args'
# Mencoder only allows certain keys, other ones cause the program
# to fail.
allowed_metadata = ['name', 'artist', 'genre', 'subject', 'copyright',
'srcform', 'comment']
# Mencoder mandates using name, but 'title' works better with ffmpeg.
# If we find it, just put it's value into name
def _remap_metadata(self):
if 'title' in self.metadata:
self.metadata['name'] = self.metadata['title']
@property
def output_args(self):
self._remap_metadata()
lavcopts = {'vcodec': self.codec}
if self.bitrate > 0:
lavcopts.update(vbitrate=self.bitrate)
args = ['-o', self.outfile, '-ovc', 'lavc', '-lavcopts',
':'.join(itertools.starmap('{0}={1}'.format,
lavcopts.items()))]
if self.extra_args:
args.extend(self.extra_args)
if self.metadata:
args.extend(['-info', ':'.join('%s=%s' % (k, v)
for k, v in six.iteritems(self.metadata)
if k in self.allowed_metadata)])
return args
# Combine Mencoder options with pipe-based writing
@writers.register('mencoder')
class MencoderWriter(MovieWriter, MencoderBase):
def _args(self):
# Returns the command line parameters for subprocess to use
# mencoder to create a movie
return [self.bin_path(), '-', '-demuxer', 'rawvideo', '-rawvideo',
('w=%i:h=%i:' % self.frame_size +
'fps=%i:format=%s' % (self.fps,
self.frame_format))] + self.output_args
# Combine Mencoder options with temp file-based writing
@writers.register('mencoder_file')
class MencoderFileWriter(FileMovieWriter, MencoderBase):
supported_formats = ['png', 'jpeg', 'tga', 'sgi']
def _args(self):
# Returns the command line parameters for subprocess to use
# mencoder to create a movie
return [self.bin_path(),
'mf://%s*.%s' % (self.temp_prefix, self.frame_format),
'-frames', str(self._frame_counter), '-mf',
'type=%s:fps=%d' % (self.frame_format,
self.fps)] + self.output_args
# Base class for animated GIFs with convert utility
class ImageMagickBase(object):
exec_key = 'animation.convert_path'
args_key = 'animation.convert_args'
@property
def delay(self):
return 100. / self.fps
@property
def output_args(self):
return [self.outfile]
@classmethod
def _init_from_registry(cls):
if sys.platform != 'win32' or rcParams[cls.exec_key] != 'convert':
return
from matplotlib.externals.six.moves import winreg
for flag in (0, winreg.KEY_WOW64_32KEY, winreg.KEY_WOW64_64KEY):
try:
hkey = winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE,
'Software\\Imagemagick\\Current',
0, winreg.KEY_QUERY_VALUE | flag)
binpath = winreg.QueryValueEx(hkey, 'BinPath')[0]
winreg.CloseKey(hkey)
binpath += '\\convert.exe'
break
except Exception:
binpath = ''
rcParams[cls.exec_key] = rcParamsDefault[cls.exec_key] = binpath
ImageMagickBase._init_from_registry()
@writers.register('imagemagick')
class ImageMagickWriter(MovieWriter, ImageMagickBase):
def _args(self):
return ([self.bin_path(),
'-size', '%ix%i' % self.frame_size, '-depth', '8',
'-delay', str(self.delay), '-loop', '0',
'%s:-' % self.frame_format]
+ self.output_args)
@writers.register('imagemagick_file')
class ImageMagickFileWriter(FileMovieWriter, ImageMagickBase):
supported_formats = ['png', 'jpeg', 'ppm', 'tiff', 'sgi', 'bmp',
'pbm', 'raw', 'rgba']
def _args(self):
return ([self.bin_path(), '-delay', str(self.delay), '-loop', '0',
'%s*.%s' % (self.temp_prefix, self.frame_format)]
+ self.output_args)
class Animation(object):
'''
This class wraps the creation of an animation using matplotlib. It is
only a base class which should be subclassed to provide needed behavior.
*fig* is the figure object that is used to get draw, resize, and any
other needed events.
*event_source* is a class that can run a callback when desired events
are generated, as well as be stopped and started. Examples include timers
(see :class:`TimedAnimation`) and file system notifications.
*blit* is a boolean that controls whether blitting is used to optimize
drawing.
'''
def __init__(self, fig, event_source=None, blit=False):
self._fig = fig
# Disables blitting for backends that don't support it. This
# allows users to request it if available, but still have a
# fallback that works if it is not.
self._blit = blit and fig.canvas.supports_blit
# These are the basics of the animation. The frame sequence represents
# information for each frame of the animation and depends on how the
# drawing is handled by the subclasses. The event source fires events
# that cause the frame sequence to be iterated.
self.frame_seq = self.new_frame_seq()
self.event_source = event_source
# Instead of starting the event source now, we connect to the figure's
# draw_event, so that we only start once the figure has been drawn.
self._first_draw_id = fig.canvas.mpl_connect('draw_event', self._start)
# Connect to the figure's close_event so that we don't continue to
# fire events and try to draw to a deleted figure.
self._close_id = self._fig.canvas.mpl_connect('close_event',
self._stop)
if self._blit:
self._setup_blit()
def _start(self, *args):
'''
Starts interactive animation. Adds the draw frame command to the GUI
handler, calls show to start the event loop.
'''
# First disconnect our draw event handler
self._fig.canvas.mpl_disconnect(self._first_draw_id)
self._first_draw_id = None # So we can check on save
# Now do any initial draw
self._init_draw()
# Add our callback for stepping the animation and
# actually start the event_source.
self.event_source.add_callback(self._step)
self.event_source.start()
def _stop(self, *args):
# On stop we disconnect all of our events.
if self._blit:
self._fig.canvas.mpl_disconnect(self._resize_id)
self._fig.canvas.mpl_disconnect(self._close_id)
self.event_source.remove_callback(self._step)
self.event_source = None
def save(self, filename, writer=None, fps=None, dpi=None, codec=None,
bitrate=None, extra_args=None, metadata=None, extra_anim=None,
savefig_kwargs=None):
'''
Saves a movie file by drawing every frame.
*filename* is the output filename, e.g., :file:`mymovie.mp4`
*writer* is either an instance of :class:`MovieWriter` or a string
key that identifies a class to use, such as 'ffmpeg' or 'mencoder'.
If nothing is passed, the value of the rcparam `animation.writer` is
used.
*dpi* controls the dots per inch for the movie frames. This combined
with the figure's size in inches controls the size of the movie.
*savefig_kwargs* is a dictionary containing keyword arguments to be
passed on to the 'savefig' command which is called repeatedly to save
the individual frames. This can be used to set tight bounding boxes,
for example.
*extra_anim* is a list of additional `Animation` objects that should
be included in the saved movie file. These need to be from the same
`matplotlib.Figure` instance. Also, animation frames will just be
simply combined, so there should be a 1:1 correspondence between
the frames from the different animations.
These remaining arguments are used to construct a :class:`MovieWriter`
instance when necessary and are only considered valid if *writer* is
not a :class:`MovieWriter` instance.
*fps* is the frames per second in the movie. Defaults to None,
which will use the animation's specified interval to set the frames
per second.
*codec* is the video codec to be used. Not all codecs are supported
by a given :class:`MovieWriter`. If none is given, this defaults to the
value specified by the rcparam `animation.codec`.
*bitrate* specifies the amount of bits used per second in the
compressed movie, in kilobits per second. A higher number means a
higher quality movie, but at the cost of increased file size. If no
value is given, this defaults to the value given by the rcparam
`animation.bitrate`.
*extra_args* is a list of extra string arguments to be passed to the
underlying movie utility. The default is None, which passes the
additional arguments in the 'animation.extra_args' rcParam.
*metadata* is a dictionary of keys and values for metadata to include
in the output file. Some keys that may be of use include:
title, artist, genre, subject, copyright, srcform, comment.
'''
# If the writer is None, use the rc param to find the name of the one
# to use
if writer is None:
writer = rcParams['animation.writer']
elif (not is_string_like(writer) and
any(arg is not None
for arg in (fps, codec, bitrate, extra_args, metadata))):
raise RuntimeError('Passing in values for arguments for arguments '
'fps, codec, bitrate, extra_args, or metadata '
'is not supported when writer is an existing '
'MovieWriter instance. These should instead be '
'passed as arguments when creating the '
'MovieWriter instance.')
if savefig_kwargs is None:
savefig_kwargs = {}
# Need to disconnect the first draw callback, since we'll be doing
# draws. Otherwise, we'll end up starting the animation.
if self._first_draw_id is not None:
self._fig.canvas.mpl_disconnect(self._first_draw_id)
reconnect_first_draw = True
else:
reconnect_first_draw = False
if fps is None and hasattr(self, '_interval'):
# Convert interval in ms to frames per second
fps = 1000. / self._interval
# Re-use the savefig DPI for ours if none is given
if dpi is None:
dpi = rcParams['savefig.dpi']
if dpi == 'figure':
dpi = self._fig.dpi
if codec is None:
codec = rcParams['animation.codec']
if bitrate is None:
bitrate = rcParams['animation.bitrate']
all_anim = [self]
if extra_anim is not None:
all_anim.extend(anim
for anim
in extra_anim if anim._fig is self._fig)
# If we have the name of a writer, instantiate an instance of the
# registered class.
if is_string_like(writer):
if writer in writers.avail:
writer = writers[writer](fps, codec, bitrate,
extra_args=extra_args,
metadata=metadata)
else:
warnings.warn("MovieWriter %s unavailable" % writer)
try:
writer = writers[writers.list()[0]](fps, codec, bitrate,
extra_args=extra_args,
metadata=metadata)
except IndexError:
raise ValueError("Cannot save animation: no writers are "
"available. Please install mencoder or "
"ffmpeg to save animations.")
verbose.report('Animation.save using %s' % type(writer),
level='helpful')
# FIXME: Using 'bbox_inches' doesn't currently work with
# writers that pipe the data to the command because this
# requires a fixed frame size (see Ryan May's reply in this
# thread: [1]). Thus we drop the 'bbox_inches' argument if it
# exists in savefig_kwargs.
#
# [1] (http://matplotlib.1069221.n5.nabble.com/
# Animation-class-let-save-accept-kwargs-which-
# are-passed-on-to-savefig-td39627.html)
#
if 'bbox_inches' in savefig_kwargs and not writer.frame_size_can_vary:
warnings.warn("Warning: discarding the 'bbox_inches' argument in "
"'savefig_kwargs' as it not supported by "
"{0}).".format(writer.__class__.__name__))
savefig_kwargs.pop('bbox_inches')
# Create a new sequence of frames for saved data. This is different
# from new_frame_seq() to give the ability to save 'live' generated
# frame information to be saved later.
# TODO: Right now, after closing the figure, saving a movie won't work
# since GUI widgets are gone. Either need to remove extra code to
# allow for this non-existent use case or find a way to make it work.
with rc_context():
# See above about bbox_inches savefig kwarg
if (not writer.frame_size_can_vary and
rcParams['savefig.bbox'] == 'tight'):
verbose.report("Disabling savefig.bbox = 'tight', as it is "
"not supported by "
"{0}.".format(writer.__class__.__name__),
level='helpful')
rcParams['savefig.bbox'] = None
with writer.saving(self._fig, filename, dpi):
for anim in all_anim:
# Clear the initial frame
anim._init_draw()
for data in zip(*[a.new_saved_frame_seq()
for a in all_anim]):
for anim, d in zip(all_anim, data):
# TODO: See if turning off blit is really necessary
anim._draw_next_frame(d, blit=False)
writer.grab_frame(**savefig_kwargs)
# Reconnect signal for first draw if necessary
if reconnect_first_draw:
self._first_draw_id = self._fig.canvas.mpl_connect('draw_event',
self._start)
def _step(self, *args):
'''
Handler for getting events. By default, gets the next frame in the
sequence and hands the data off to be drawn.
'''
# Returns True to indicate that the event source should continue to
# call _step, until the frame sequence reaches the end of iteration,
# at which point False will be returned.
try:
framedata = next(self.frame_seq)
self._draw_next_frame(framedata, self._blit)
return True
except StopIteration:
return False
def new_frame_seq(self):
'Creates a new sequence of frame information.'
# Default implementation is just an iterator over self._framedata
return iter(self._framedata)
def new_saved_frame_seq(self):
'Creates a new sequence of saved/cached frame information.'
# Default is the same as the regular frame sequence
return self.new_frame_seq()
def _draw_next_frame(self, framedata, blit):
# Breaks down the drawing of the next frame into steps of pre- and
# post- draw, as well as the drawing of the frame itself.
self._pre_draw(framedata, blit)
self._draw_frame(framedata)
self._post_draw(framedata, blit)
def _init_draw(self):
# Initial draw to clear the frame. Also used by the blitting code
# when a clean base is required.
pass
def _pre_draw(self, framedata, blit):
# Perform any cleaning or whatnot before the drawing of the frame.
# This default implementation allows blit to clear the frame.
if blit:
self._blit_clear(self._drawn_artists, self._blit_cache)
def _draw_frame(self, framedata):
# Performs actual drawing of the frame.
raise NotImplementedError('Needs to be implemented by subclasses to'
' actually make an animation.')
def _post_draw(self, framedata, blit):
# After the frame is rendered, this handles the actual flushing of
# the draw, which can be a direct draw_idle() or make use of the
# blitting.
if blit and self._drawn_artists:
self._blit_draw(self._drawn_artists, self._blit_cache)
else:
self._fig.canvas.draw_idle()
# The rest of the code in this class is to facilitate easy blitting
def _blit_draw(self, artists, bg_cache):
# Handles blitted drawing, which renders only the artists given instead
# of the entire figure.
updated_ax = []
for a in artists:
# If we haven't cached the background for this axes object, do
# so now. This might not always be reliable, but it's an attempt
# to automate the process.
if a.axes not in bg_cache:
bg_cache[a.axes] = a.figure.canvas.copy_from_bbox(a.axes.bbox)
a.axes.draw_artist(a)
updated_ax.append(a.axes)
# After rendering all the needed artists, blit each axes individually.
for ax in set(updated_ax):
ax.figure.canvas.blit(ax.bbox)
def _blit_clear(self, artists, bg_cache):
# Get a list of the axes that need clearing from the artists that
# have been drawn. Grab the appropriate saved background from the
# cache and restore.
axes = set(a.axes for a in artists)
for a in axes:
a.figure.canvas.restore_region(bg_cache[a])
def _setup_blit(self):
# Setting up the blit requires: a cache of the background for the
# axes
self._blit_cache = dict()
self._drawn_artists = []
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._handle_resize)
self._post_draw(None, self._blit)
def _handle_resize(self, *args):
# On resize, we need to disable the resize event handling so we don't
# get too many events. Also stop the animation events, so that
# we're paused. Reset the cache and re-init. Set up an event handler
# to catch once the draw has actually taken place.
self._fig.canvas.mpl_disconnect(self._resize_id)
self.event_source.stop()
self._blit_cache.clear()
self._init_draw()
self._resize_id = self._fig.canvas.mpl_connect('draw_event',
self._end_redraw)
def _end_redraw(self, evt):
# Now that the redraw has happened, do the post draw flushing and
# blit handling. Then re-enable all of the original events.
self._post_draw(None, self._blit)
self.event_source.start()
self._fig.canvas.mpl_disconnect(self._resize_id)
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._handle_resize)
def to_html5_video(self):
r'''Returns animation as an HTML5 video tag.
This saves the animation as an h264 video, encoded in base64
directly into the HTML5 video tag. This respects the rc parameters
for the writer as well as the bitrate. This also makes use of the
``interval`` to control the speed, and uses the ``repeat``
parameter to decide whether to loop.
'''
VIDEO_TAG = r'''<video {size} {options}>
<source type="video/mp4" src="data:video/mp4;base64,{video}">
Your browser does not support the video tag.
</video>'''
# Cache the the rendering of the video as HTML
if not hasattr(self, '_base64_video'):
# First write the video to a tempfile. Set delete to False
# so we can re-open to read binary data.
with tempfile.NamedTemporaryFile(suffix='.m4v',
delete=False) as f:
# We create a writer manually so that we can get the
# appropriate size for the tag
Writer = writers[rcParams['animation.writer']]
writer = Writer(codec='h264',
bitrate=rcParams['animation.bitrate'],
fps=1000. / self._interval)
self.save(f.name, writer=writer)
# Now open and base64 encode
with open(f.name, 'rb') as video:
vid64 = encodebytes(video.read())
self._base64_video = vid64.decode('ascii')
self._video_size = 'width="{0}" height="{1}"'.format(
*writer.frame_size)
# Now we can remove
os.remove(f.name)
# Default HTML5 options are to autoplay and to display video controls
options = ['controls', 'autoplay']
# If we're set to repeat, make it loop
if self.repeat:
options.append('loop')
return VIDEO_TAG.format(video=self._base64_video,
size=self._video_size,
options=' '.join(options))
def _repr_html_(self):
r'IPython display hook for rendering.'
fmt = rcParams['animation.html']
if fmt == 'html5':
return self.to_html5_video()
class TimedAnimation(Animation):
'''
:class:`Animation` subclass that supports time-based animation, drawing
a new frame every *interval* milliseconds.
*repeat* controls whether the animation should repeat when the sequence
of frames is completed.
*repeat_delay* optionally adds a delay in milliseconds before repeating
the animation.
'''
def __init__(self, fig, interval=200, repeat_delay=None, repeat=True,
event_source=None, *args, **kwargs):
# Store the timing information
self._interval = interval
self._repeat_delay = repeat_delay
self.repeat = repeat
# If we're not given an event source, create a new timer. This permits
# sharing timers between animation objects for syncing animations.
if event_source is None:
event_source = fig.canvas.new_timer()
event_source.interval = self._interval
Animation.__init__(self, fig, event_source=event_source,
*args, **kwargs)
def _step(self, *args):
'''
Handler for getting events.
'''
# Extends the _step() method for the Animation class. If
# Animation._step signals that it reached the end and we want to
# repeat, we refresh the frame sequence and return True. If
# _repeat_delay is set, change the event_source's interval to our loop
# delay and set the callback to one which will then set the interval
# back.
still_going = Animation._step(self, *args)
if not still_going and self.repeat:
self._init_draw()
self.frame_seq = self.new_frame_seq()
if self._repeat_delay:
self.event_source.remove_callback(self._step)
self.event_source.add_callback(self._loop_delay)
self.event_source.interval = self._repeat_delay
return True
else:
return Animation._step(self, *args)
else:
return still_going
def _stop(self, *args):
# If we stop in the middle of a loop delay (which is relatively likely
# given the potential pause here, remove the loop_delay callback as
# well.
self.event_source.remove_callback(self._loop_delay)
Animation._stop(self)
def _loop_delay(self, *args):
# Reset the interval and change callbacks after the delay.
self.event_source.remove_callback(self._loop_delay)
self.event_source.interval = self._interval
self.event_source.add_callback(self._step)
Animation._step(self)
class ArtistAnimation(TimedAnimation):
'''
Before calling this function, all plotting should have taken place
and the relevant artists saved.
frame_info is a list, with each list entry a collection of artists that
represent what needs to be enabled on each frame. These will be disabled
for other frames.
'''
def __init__(self, fig, artists, *args, **kwargs):
# Internal list of artists drawn in the most recent frame.
self._drawn_artists = []
# Use the list of artists as the framedata, which will be iterated
# over by the machinery.
self._framedata = artists
TimedAnimation.__init__(self, fig, *args, **kwargs)
def _init_draw(self):
# Make all the artists involved in *any* frame invisible
figs = set()
for f in self.new_frame_seq():
for artist in f:
artist.set_visible(False)
artist.set_animated(self._blit)
# Assemble a list of unique axes that need flushing
if artist.axes.figure not in figs:
figs.add(artist.axes.figure)
# Flush the needed axes
for fig in figs:
fig.canvas.draw_idle()
def _pre_draw(self, framedata, blit):
'''
Clears artists from the last frame.
'''
if blit:
# Let blit handle clearing
self._blit_clear(self._drawn_artists, self._blit_cache)
else:
# Otherwise, make all the artists from the previous frame invisible
for artist in self._drawn_artists:
artist.set_visible(False)
def _draw_frame(self, artists):
# Save the artists that were passed in as framedata for the other
# steps (esp. blitting) to use.
self._drawn_artists = artists
# Make all the artists from the current frame visible
for artist in artists:
artist.set_visible(True)
class FuncAnimation(TimedAnimation):
'''
Makes an animation by repeatedly calling a function *func*, passing in
(optional) arguments in *fargs*.
*frames* can be a generator, an iterable, or a number of frames.
*init_func* is a function used to draw a clear frame. If not given, the
results of drawing from the first item in the frames sequence will be
used. This function will be called once before the first frame.
If blit=True, *func* and *init_func* must return an iterable of
artists to be re-drawn.
*kwargs* include *repeat*, *repeat_delay*, and *interval*:
*interval* draws a new frame every *interval* milliseconds.
*repeat* controls whether the animation should repeat when the sequence
of frames is completed.
*repeat_delay* optionally adds a delay in milliseconds before repeating
the animation.
'''
def __init__(self, fig, func, frames=None, init_func=None, fargs=None,
save_count=None, **kwargs):
if fargs:
self._args = fargs
else:
self._args = ()
self._func = func
# Amount of framedata to keep around for saving movies. This is only
# used if we don't know how many frames there will be: in the case
# of no generator or in the case of a callable.
self.save_count = save_count
# Set up a function that creates a new iterable when needed. If nothing
# is passed in for frames, just use itertools.count, which will just
# keep counting from 0. A callable passed in for frames is assumed to
# be a generator. An iterable will be used as is, and anything else
# will be treated as a number of frames.
if frames is None:
self._iter_gen = itertools.count
elif six.callable(frames):
self._iter_gen = frames
elif iterable(frames):
self._iter_gen = lambda: iter(frames)
if hasattr(frames, '__len__'):
self.save_count = len(frames)
else:
self._iter_gen = lambda: xrange(frames).__iter__()
self.save_count = frames
# If we're passed in and using the default, set it to 100.
if self.save_count is None:
self.save_count = 100
self._init_func = init_func
# Needs to be initialized so the draw functions work without checking
self._save_seq = []
TimedAnimation.__init__(self, fig, **kwargs)
# Need to reset the saved seq, since right now it will contain data
# for a single frame from init, which is not what we want.
self._save_seq = []
def new_frame_seq(self):
# Use the generating function to generate a new frame sequence
return self._iter_gen()
def new_saved_frame_seq(self):
# Generate an iterator for the sequence of saved data. If there are
# no saved frames, generate a new frame sequence and take the first
# save_count entries in it.
if self._save_seq:
# While iterating we are going to update _save_seq
# so make a copy to safely iterate over
self._old_saved_seq = list(self._save_seq)
return iter(self._old_saved_seq)
else:
return itertools.islice(self.new_frame_seq(), self.save_count)
def _init_draw(self):
# Initialize the drawing either using the given init_func or by
# calling the draw function with the first item of the frame sequence.
# For blitting, the init_func should return a sequence of modified
# artists.
if self._init_func is None:
self._draw_frame(next(self.new_frame_seq()))
else:
self._drawn_artists = self._init_func()
if self._blit:
if self._drawn_artists is None:
raise RuntimeError('The init_func must return a '
'sequence of Artist objects.')
for a in self._drawn_artists:
a.set_animated(self._blit)
self._save_seq = []
def _draw_frame(self, framedata):
# Save the data for potential saving of movies.
self._save_seq.append(framedata)
# Make sure to respect save_count (keep only the last save_count
# around)
self._save_seq = self._save_seq[-self.save_count:]
# Call the func with framedata and args. If blitting is desired,
# func needs to return a sequence of any artists that were modified.
self._drawn_artists = self._func(framedata, *self._args)
if self._blit:
if self._drawn_artists is None:
raise RuntimeError('The animation function must return a '
'sequence of Artist objects.')
for a in self._drawn_artists:
a.set_animated(self._blit)
| mit |
sf-wind/caffe2 | setup.py | 1 | 6678 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from distutils.spawn import find_executable
from distutils import sysconfig, log
import setuptools
import setuptools.command.build_py
import setuptools.command.develop
import setuptools.command.build_ext
from collections import namedtuple
import os
import shlex
import subprocess
import sys
from textwrap import dedent
TOP_DIR = os.path.realpath(os.path.dirname(__file__))
SRC_DIR = os.path.join(TOP_DIR, 'caffe2')
install_requires = set()
setup_requires = set()
test_requires = set()
################################################################################
# Pre Check
################################################################################
assert find_executable('cmake'), 'Could not find "cmake" executable!'
assert find_executable('make'), 'Could not find "make" executable!'
################################################################################
# Version
################################################################################
try:
git_version = subprocess.check_output(['git', 'describe', '--tags', 'HEAD'],
cwd=TOP_DIR).decode('ascii').strip()
except subprocess.CalledProcessError:
git_version = None
with open(os.path.join(TOP_DIR, 'VERSION_NUMBER')) as version_file:
VersionInfo = namedtuple('VersionInfo', ['version', 'git_version'])(
version=version_file.read().strip(),
git_version=git_version
)
################################################################################
# Customized commands
################################################################################
class Caffe2Command(setuptools.Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
class create_version(Caffe2Command):
def run(self):
with open(os.path.join(SRC_DIR, 'version.py'), 'w') as f:
f.write(dedent('''
version = '{version}'
git_version = '{git_version}'
'''.format(**dict(VersionInfo._asdict()))))
class build_py(setuptools.command.build_py.build_py):
def run(self):
self.run_command('create_version')
setuptools.command.build_py.build_py.run(self)
class develop(setuptools.command.develop.develop):
def run(self):
raise RuntimeError('develop mode is not supported!')
class build_ext(setuptools.command.build_ext.build_ext):
def _build_with_cmake(self):
build_temp = os.path.realpath(self.build_temp)
build_lib = os.path.realpath(self.build_lib)
if 'CMAKE_INSTALL_DIR' not in os.environ:
cmake_install_dir = os.path.join(build_temp, 'cmake_install')
py_exe = sys.executable
py_inc = sysconfig.get_python_inc()
if 'CMAKE_ARGS' in os.environ:
cmake_args = shlex.split(os.environ['CMAKE_ARGS'])
# prevent crossfire with downstream scripts
del os.environ['CMAKE_ARGS']
else:
cmake_args = []
log.info('CMAKE_ARGS: {}'.format(cmake_args))
self.compiler.spawn([
os.path.join(TOP_DIR, 'scripts', 'build_local.sh'),
'-DBUILD_SHARED_LIBS=OFF',
# TODO: Investigate why BUILD_SHARED_LIBS=OFF USE_GLOO=ON
# will cause error 'target "gloo" that is not in the
# export set' in cmake.
'-DUSE_GLOO=OFF',
'-DCMAKE_INSTALL_PREFIX:PATH={}'.format(cmake_install_dir),
'-DPYTHON_EXECUTABLE:FILEPATH={}'.format(py_exe),
'-DPYTHON_INCLUDE_DIR={}'.format(py_inc),
'-DBUILD_TEST=OFF',
'-BUILD_BENCHMARK=OFF',
'-DBUILD_BINARY=OFF',
TOP_DIR
] + cmake_args)
# This is assuming build_local.sh will use TOP_DIR/build
# as the cmake build directory
self.compiler.spawn([
'make',
'-C', os.path.join(TOP_DIR, 'build'),
'install'
])
else:
cmake_install_dir = os.environ['CMAKE_INSTALL_DIR']
for d in ['caffe', 'caffe2']:
self.copy_tree(os.path.join(cmake_install_dir, d),
os.path.join(build_lib, d))
def get_outputs(self):
return [os.path.join(self.build_lib, d)
for d in ['caffe', 'caffe2']]
def build_extensions(self):
assert len(self.extensions) == 1
self._build_with_cmake()
cmdclass = {
'create_version': create_version,
'build_py': build_py,
'develop': develop,
'build_ext': build_ext,
}
################################################################################
# Extensions
################################################################################
ext_modules = [setuptools.Extension(str('caffe2-ext'), [])]
################################################################################
# Packages
################################################################################
packages = []
install_requires.update(['protobuf',
'numpy',
'flask',
'future',
'graphviz',
'hypothesis',
'jupyter',
'matplotlib',
'pydot',
'python-nvd3',
'pyyaml',
'requests',
'scikit-image',
'scipy',
'setuptools',
'six',
'tornado'])
################################################################################
# Test
################################################################################
setup_requires.add('pytest-runner')
test_requires.update(['pytest-cov', 'hypothesis'])
################################################################################
# Final
################################################################################
setuptools.setup(
name='caffe2',
version=VersionInfo.version,
description='Caffe2',
ext_modules=ext_modules,
cmdclass=cmdclass,
packages=packages,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=test_requires,
author='jiayq',
author_email='[email protected]',
url='https://caffe2.ai',
)
| apache-2.0 |
zhreshold/mxnet | example/gluon/house_prices/kaggle_k_fold_cross_validation.py | 26 | 6854 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This example provides an end-to-end pipeline for a common Kaggle competition.
# The entire pipeline includes common utilities such as k-fold cross validation
# and data pre-processing.
#
# Specifically, the example studies the `House Prices: Advanced Regression
# Techniques` challenge as a case study.
#
# The link to the problem on Kaggle:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques
import numpy as np
import pandas as pd
from mxnet import autograd
from mxnet import gluon
from mxnet import ndarray as nd
# After logging in www.kaggle.com, the training and testing data sets can be downloaded at:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/train.csv
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/test.csv
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
all_X = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'],
test.loc[:, 'MSSubClass':'SaleCondition']))
# Get all the numerical features and apply standardization.
numeric_feas = all_X.dtypes[all_X.dtypes != "object"].index
all_X[numeric_feas] = all_X[numeric_feas].apply(lambda x:
(x - x.mean()) / (x.std()))
# Convert categorical feature values to numerical (including N/A).
all_X = pd.get_dummies(all_X, dummy_na=True)
# Approximate N/A feature value by the mean value of the current feature.
all_X = all_X.fillna(all_X.mean())
num_train = train.shape[0]
# Convert data formats to NDArrays to feed into gluon.
X_train = all_X[:num_train].as_matrix()
X_test = all_X[num_train:].as_matrix()
y_train = train.SalePrice.as_matrix()
X_train = nd.array(X_train)
y_train = nd.array(y_train)
y_train.reshape((num_train, 1))
X_test = nd.array(X_test)
square_loss = gluon.loss.L2Loss()
def get_rmse_log(net, X_train, y_train):
"""Gets root mse between the logarithms of the prediction and the truth."""
num_train = X_train.shape[0]
clipped_preds = nd.clip(net(X_train), 1, float('inf'))
return np.sqrt(2 * nd.sum(square_loss(
nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
def get_net():
"""Gets a neural network. Better results are obtained with modifications."""
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(50, activation="relu"))
net.add(gluon.nn.Dense(1))
net.initialize()
return net
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size):
"""Trains the model."""
dataset_train = gluon.data.ArrayDataset(X_train, y_train)
data_iter_train = gluon.data.DataLoader(dataset_train, batch_size,
shuffle=True)
trainer = gluon.Trainer(net.collect_params(), 'adam',
{'learning_rate': learning_rate,
'wd': weight_decay})
net.initialize(force_reinit=True)
for epoch in range(epochs):
for data, label in data_iter_train:
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
avg_loss = get_rmse_log(net, X_train, y_train)
if epoch > verbose_epoch:
print("Epoch %d, train loss: %f" % (epoch, avg_loss))
return avg_loss
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size):
"""Conducts k-fold cross validation for the model."""
assert k > 1
fold_size = X_train.shape[0] // k
train_loss_sum = 0.0
test_loss_sum = 0.0
for test_idx in range(k):
X_val_test = X_train[test_idx * fold_size: (test_idx + 1) *
fold_size, :]
y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size]
val_train_defined = False
for i in range(k):
if i != test_idx:
X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :]
y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size]
if not val_train_defined:
X_val_train = X_cur_fold
y_val_train = y_cur_fold
val_train_defined = True
else:
X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0)
y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0)
net = get_net()
train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch,
learning_rate, weight_decay, batch_size)
train_loss_sum += train_loss
test_loss = get_rmse_log(net, X_val_test, y_val_test)
print("Test loss: %f" % test_loss)
test_loss_sum += test_loss
return train_loss_sum / k, test_loss_sum / k
# The sets of parameters. Better results are obtained with modifications.
# These parameters can be fine-tuned with k-fold cross-validation.
k = 5
epochs = 100
verbose_epoch = 95
learning_rate = 0.3
weight_decay = 100
batch_size = 100
train_loss, test_loss = \
k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size)
print("%d-fold validation: Avg train loss: %f, Avg test loss: %f" %
(k, train_loss, test_loss))
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size):
"""Trains the model and predicts on the test data set."""
net = get_net()
_ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size)
preds = net(X_test).asnumpy()
test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test['Id'], test['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False)
learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size)
| apache-2.0 |
ZENGXH/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
gotomypc/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
pompiduskus/scikit-learn | sklearn/utils/testing.py | 84 | 24860 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
nddsg/TreeDecomps | xplodnTree/infimir.py | 1 | 20792 | #!/usr/bin/env python
# make the other metrics work
# generate the txt files, then work on the pdf otuput
# TODO:load_edgelist
__version__ = "0.1.0"
import matplotlib
import numpy as np
import pandas as pd
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import sys
import os
import re
import cPickle
import time
import networkx as nx
from core import PHRG as phrg
from core import tree_decomposition as td
from core import probabilistic_cfg as pcfg
from core import net_metrics as metrics
import pprint as pp
import argparse, traceback
from core import graph_sampler as gs
from multiprocessing import Process
# from core import graph_name
import explodingTree as xt
from core.file_utils import graph_name
import multiprocessing as mp
DBG = False
results = []
# ~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~100
def get_parser():
parser = argparse.ArgumentParser(description='Infer a model given a graph (derive a model)')
parser.add_argument('--orig', required=True, nargs=1, help='Filename of edgelist graph')
parser.add_argument('--chunglu', help='Generate chunglu graphs', action='store_true')
parser.add_argument('--kron', help='Generate Kronecker product graphs', action='store_true')
parser.add_argument('--samp', help='Sample sg>dur>gg2targetN', action='store_true')
parser.add_argument('-tw', action='store_true', default=0, required=0, help="print xphrg mcs tw")
parser.add_argument('--nstats', action='store_true', default=0, required=0, help="Compute stats?")
parser.add_argument('-prs', action='store_true', default=0, required=0, help="stop at prs")
parser.add_argument('--rnbr', action='store_true', default=0, required=0, help="stop at prs")
parser.add_argument('--version', action='version', version=__version__)
return parser
def collect_results(result):
# results.extend(result)
results.append(result)
def Hstar_Graphs_Control(G, graph_name, axs=None):
# Derive the prod rules in a naive way, where
prod_rules = phrg.probabilistic_hrg_learning(G)
pp.pprint(prod_rules)
g = pcfg.Grammar('S')
for (id, lhs, rhs, prob) in prod_rules:
g.add_rule(pcfg.Rule(id, lhs, rhs, prob))
num_nodes = G.number_of_nodes()
print "Starting max size", 'n=', num_nodes
g.set_max_size(num_nodes)
print "Done with max size"
Hstars = []
num_samples = 20
print '*' * 40
for i in range(0, num_samples):
rule_list = g.sample(num_nodes)
hstar = phrg.grow(rule_list, g)[0]
Hstars.append(hstar)
# if 0:
# g = nx.from_pandas_dataframe(df, 'src', 'trg', edge_attr=['ts'])
# draw_degree_whole_graph(g,axs)
# draw_degree(Hstars, axs=axs, col='r')
# #axs.set_title('Rules derived by ignoring time')
# axs.set_ylabel('Frequency')
# axs.set_xlabel('degree')
if 0:
# metricx = [ 'degree','hops', 'clust', 'assort', 'kcore','eigen','gcd']
metricx = ['clust']
# g = nx.from_pandas_dataframe(df, 'src', 'trg',edge_attr=['ts'])
# graph_name = os.path.basename(f_path).rstrip('.tel')
if DBG: print ">", graph_name
metrics.network_properties([G], metricx, Hstars, name=graph_name, out_tsv=True)
def pandas_dataframes_from_edgelists(el_files):
if (el_files is None): return
list_of_dataframes = []
for f in el_files:
print '~' * 80
print f
temporal_graph = False
with open(f, 'r') as ifile:
line = ifile.readline()
while (not temporal_graph):
if ("%" in line):
line = ifile.readline()
elif len(line.split()) > 3:
temporal_graph = True
if (temporal_graph):
dat = np.genfromtxt(f, dtype=np.int64, comments='%', delimiter="\t", usecols=[0, 1, 3],
autostrip=True)
df = pd.DataFrame(dat, columns=['src', 'trg', 'ts'])
else:
dat = np.genfromtxt(f, dtype=np.int64, comments='%', delimiter="\t", usecols=[0, 1],
autostrip=True)
df = pd.DataFrame(dat, columns=['src', 'trg'])
df = df.drop_duplicates()
list_of_dataframes.append(df)
list_of_dataframes.append(df)
return list_of_dataframes
def grow_exact_size_hrg_graphs_from_prod_rules(prod_rules, gname, n, runs=1):
"""
Args:
rules: production rules (model)
gname: graph name
n: target graph order (number of nodes)
runs: how many graphs to generate
Returns: list of synthetic graphs
"""
DBG = True
if n <= 0: sys.exit(1)
g = pcfg.Grammar('S')
for (id, lhs, rhs, prob) in prod_rules:
g.add_rule(pcfg.Rule(id, lhs, rhs, prob))
print
print "Added rules HRG (pr", len(prod_rules), ", n,", n, ")"
num_nodes = n
if DBG: print "Starting max size ..."
t_start = time.time()
g.set_max_size(num_nodes)
print "Done with max size, took %s seconds" % (time.time() - t_start)
hstars_lst = []
print " ",
for i in range(0, runs):
print '>',
rule_list = g.sample(num_nodes)
hstar = phrg.grow(rule_list, g)[0]
hstars_lst.append(hstar)
return hstars_lst
def grow_hrg_graphs_with_infinity(prod_rules, gname, n, runs=1,rnbr = 1, ):
"""
Args:
rules: production rules (model)
gname: graph name
n: target graph order (number of nodes)
runs: how many graphs to generate
Returns: list of synthetic graphs
"""
DBG = True
if n <= 0: sys.exit(1)
g = pcfg.Grammar('S')
for (id, lhs, rhs, prob) in prod_rules:
g.add_rule(pcfg.Rule(id, lhs, rhs, prob))
print
print "Added rules HRG (pr", len(prod_rules), ", n,", n, ")"
num_nodes = n
if DBG: print "Starting max size ..."
t_start = time.time()
g.set_max_size(num_nodes)
print "Done with max size, took %s seconds" % (time.time() - t_start)
hstars_lst = []
print " ",
for i in range(0, runs):
rule_list = g.sample(num_nodes)
hstar = phrg.grow(rule_list, g)[0]
hstars_lst.append(hstar)
return hstars_lst
def pwrlaw_plot(xdata, ydata, yerr):
from scipy import log10, optimize, sqrt
powerlaw = lambda x, amp, index: amp * (x ** index)
logx = log10(xdata)
logy = log10(ydata)
logyerr = yerr / ydata
# define our (line) fitting function
fitfunc = lambda p, x: p[0] + p[1] * x
errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err
pinit = [1.0, -1.0]
out = optimize.leastsq(errfunc, pinit,
args=(logx, logy, logyerr), full_output=1)
pfinal = out[0]
covar = out[1]
print pfinal
print covar
index = pfinal[1]
amp = 10.0 ** pfinal[0]
indexErr = sqrt(covar[0][0])
ampErr = sqrt(covar[1][1]) * amp
print index
# ########
# plotting
# ########
# ax.plot(ydata)
# ax.plot(pl_sequence)
fig, axs = plt.subplots(2, 1)
axs[0].plot(xdata, powerlaw(xdata, amp, index)) # Fit
axs[0].errorbar(xdata, ydata, yerr=yerr, fmt='k.') # Data
(yh1, yh2) = (axs[0].get_ylim()[1] * .9, axs[0].get_ylim()[1] * .8)
xh = axs[0].get_xlim()[0] * 1.1
print axs[0].get_ylim()
print (yh1, yh2)
axs[0].text(xh, yh1, 'Ampli = %5.2f +/- %5.2f' % (amp, ampErr))
axs[0].text(xh, yh2, 'Index = %5.2f +/- %5.2f' % (index, indexErr))
axs[0].set_title('Best Fit Power Law')
axs[0].set_xlabel('X')
axs[0].set_ylabel('Y')
# xlim(1, 11)
#
# subplot(2, 1, 2)
axs[1].loglog(xdata, powerlaw(xdata, amp, index))
axs[1].errorbar(xdata, ydata, yerr=yerr, fmt='k.') # Data
axs[1].set_xlabel('X (log scale)')
axs[1].set_ylabel('Y (log scale)')
import datetime
figfname = datetime.datetime.now().strftime("%d%b%y") + "_pl"
plt.savefig(figfname, bbox_inches='tight')
return figfname
def deg_vcnt_to_disk(orig_graph, synthetic_graphs):
df = pd.DataFrame(orig_graph.degree().items())
gb = df.groupby([1]).count()
# gb.to_csv("Results/deg_orig_"+orig_graph.name+".tsv", sep='\t', header=True)
gb.index.rename('k', inplace=True)
gb.columns = ['vcnt']
gb.to_csv("Results/deg_orig_" + orig_graph.name + ".tsv", sep='\t', header=True)
# ## - group of synth graphs -
deg_df = pd.DataFrame()
for g in synthetic_graphs:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1]).count()
# Degree vs cnt
deg_df = pd.concat([deg_df, gb], axis=1) # Appends to bottom new DFs
# print gb
deg_df['mean'] = deg_df.mean(axis=1)
deg_df.index.rename('k', inplace=True)
deg_df['mean'].to_csv("Results/deg_xphrg_" + orig_graph.name + ".tsv", sep='\t', header=True)
def plot_g_hstars(orig_graph, synthetic_graphs):
df = pd.DataFrame(orig_graph.degree().items())
gb = df.groupby([1]).count()
# gb.to_csv("Results/deg_orig_"+orig_graph.name+".tsv", sep='\t', header=True)
gb.index.rename('k', inplace=True)
gb.columns = ['vcnt']
# k_cnt = [(x.tolist(),y.values[0]) for x,y in gb.iterrows()]
xdata = np.array([x.tolist() for x, y in gb.iterrows()])
ydata = np.array([y.values[0] for x, y in gb.iterrows()])
yerr = ydata * 0.000001
fig, ax = plt.subplots()
ax.plot(gb.index.values, gb['vcnt'].values, '-o', markersize=8, markerfacecolor='w',
markeredgecolor=[0, 0, 1], alpha=0.5, label="orig")
ofname = pwrlaw_plot(xdata, ydata, yerr)
if os.path.exists(ofname): print '... Plot save to:', ofname
deg_df = pd.DataFrame()
for g in synthetic_graphs:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1]).count()
# Degree vs cnt
deg_df = pd.concat([deg_df, gb], axis=1) # Appends to bottom new DFs
# print gb
deg_df['mean'] = deg_df.mean(axis=1)
deg_df.index.rename('k', inplace=True)
# ax.plot(y=deg_df.mean(axis=1))
# ax.plot(y=deg_df.median(axis=1))
# ax.plot()
# orig
deg_df.mean(axis=1).plot(ax=ax, label='mean', color='r')
deg_df.median(axis=1).plot(ax=ax, label='median', color='g')
ax.fill_between(deg_df.index, deg_df.mean(axis=1) - deg_df.sem(axis=1),
deg_df.mean(axis=1) + deg_df.sem(axis=1), alpha=0.2, label="se")
# ax.plot(k_cnt)
# deg_df.plot(ax=ax)
# for x,y in k_cnt:
# if DBG: print "{}\t{}".format(x,y)
#
#
# for g in synths:
# df = pd.DataFrame(g.degree().items())
# gb = df.groupby([1]).count()
# # gb.plot(ax=ax)
# for x,y in k_cnt:
# if DBG: print "{}\t{}".format(x,y)
#
# # Curve-fit
#
plt.savefig('tmpfig', bbox_inches='tight')
def treewidth(parent, children, twlst):
twlst.append(parent)
for x in children:
if isinstance(x, (tuple, list)):
treewidth(x[0], x[1], twlst)
else:
print type(x), len(x)
def print_treewdith(tree):
root, children = tree
print " computing tree width"
twdth = []
treewidth(root, children, twdth)
print ' Treewidth:', np.max([len(x) - 1 for x in twdth])
def get_hrg_production_rules(edgelist_data_frame, graph_name, tw=False,
n_subg=2, n_nodes=300, nstats=False):
t_start = time.time()
df = edgelist_data_frame
if df.shape[1] == 4:
G = nx.from_pandas_dataframe(df, 'src', 'trg', edge_attr=True) # whole graph
elif df.shape[1] == 3:
G = nx.from_pandas_dataframe(df, 'src', 'trg', ['ts']) # whole graph
else:
G = nx.from_pandas_dataframe(df, 'src', 'trg')
G.name = graph_name
print "==> read in graph took: {} seconds".format(time.time() - t_start)
G.remove_edges_from(G.selfloop_edges())
giant_nodes = max(nx.connected_component_subgraphs(G), key=len)
G = nx.subgraph(G, giant_nodes)
num_nodes = G.number_of_nodes()
phrg.graph_checks(G)
if DBG: print
if DBG: print "--------------------"
if not DBG: print "-Tree Decomposition-"
if DBG: print "--------------------"
prod_rules = {}
K = n_subg
n = n_nodes
if num_nodes >= 500:
print 'Grande'
t_start = time.time()
for Gprime in gs.rwr_sample(G, K, n):
T = td.quickbb(Gprime)
root = list(T)[0]
T = td.make_rooted(T, root)
T = phrg.binarize(T)
root = list(T)[0]
root, children = T
# td.new_visit(T, G, prod_rules, TD)
td.new_visit(T, G, prod_rules)
Process(target=td.new_visit, args=(T, G, prod_rules,)).start()
else:
T = td.quickbb(G)
root = list(T)[0]
T = td.make_rooted(T, root)
T = phrg.binarize(T)
root = list(T)[0]
root, children = T
# td.new_visit(T, G, prod_rules, TD)
td.new_visit(T, G, prod_rules)
# print_treewidth(T)
exit()
if DBG: print
if DBG: print "--------------------"
if DBG: print "- Production Rules -"
if DBG: print "--------------------"
for k in prod_rules.iterkeys():
if DBG: print k
s = 0
for d in prod_rules[k]:
s += prod_rules[k][d]
for d in prod_rules[k]:
prod_rules[k][d] = float(prod_rules[k][d]) / float(
s) # normailization step to create probs not counts.
if DBG: print '\t -> ', d, prod_rules[k][d]
rules = []
id = 0
for k, v in prod_rules.iteritems():
sid = 0
for x in prod_rules[k]:
rhs = re.findall("[^()]+", x)
rules.append(("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x]))
if DBG: print ("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x])
sid += 1
id += 1
df = pd.DataFrame(rules)
'''print "++++++++++"
df.to_csv('ProdRules/{}_prs.tsv'.format(G.name), header=False, index=False, sep="\t")
if os.path.exists('ProdRules/{}_prs.tsv'.format(G.name)):
print 'Saved', 'ProdRules/{}_prs.tsv'.format(G.name)
else:
print "Trouble saving"
print "-----------"
print [type(x) for x in rules[0]] '''
'''
Graph Generation of Synthetic Graphs
Grow graphs usigng the union of rules from sampled sugbgraphs to predict the target order of the
original graph
'''
hStars = grow_exact_size_hrg_graphs_from_prod_rules(rules, graph_name, G.number_of_nodes(), 10)
print '... hStart graphs:', len(hStars)
d = {graph_name + "_hstars": hStars}
with open(r"Results/{}_hstars.pickle".format(graph_name), "wb") as output_file:
cPickle.dump(d, output_file)
if os.path.exists(r"Results/{}_hstars.pickle".format(graph_name)): print "File saved"
'''if nstats:
metricx = ['clust']
metrics.network_properties([G], metricx, hStars, name=graph_name, out_tsv=False)
'''
def get_hrg_production_rules_given(G, tw=False, n_subg=2, n_nodes=300, nstats=False):
G.remove_edges_from(G.selfloop_edges())
giant_nodes = max(nx.connected_component_subgraphs(G), key=len)
G = nx.subgraph(G, giant_nodes)
num_nodes = G.number_of_nodes()
phrg.graph_checks(G)
if DBG: print
if DBG: print "--------------------"
if not DBG: print "-Tree Decomposition-"
if DBG: print "--------------------"
prod_rules = {}
K = n_subg
n = n_nodes
if num_nodes >= 500:
print 'Grande'
t_start = time.time()
for Gprime in gs.rwr_sample(G, K, n):
T = td.quickbb(Gprime)
root = list(T)[0]
T = td.make_rooted(T, root)
T = phrg.binarize(T)
root = list(T)[0]
root, children = T
# td.new_visit(T, G, prod_rules, TD)
td.new_visit(T, G, prod_rules)
Process(target=td.new_visit, args=(T, G, prod_rules,)).start()
else:
T = td.quickbb(G)
root = list(T)[0]
T = td.make_rooted(T, root)
T = phrg.binarize(T)
root = list(T)[0]
root, children = T
# td.new_visit(T, G, prod_rules, TD)
td.new_visit(T, G, prod_rules)
# print_treewidth(T)
exit()
if DBG: print
if DBG: print "--------------------"
if DBG: print "- Production Rules -"
if DBG: print "--------------------"
for k in prod_rules.iterkeys():
if DBG: print k
s = 0
for d in prod_rules[k]:
s += prod_rules[k][d]
for d in prod_rules[k]:
prod_rules[k][d] = float(prod_rules[k][d]) / float(
s) # normailization step to create probs not counts.
if DBG: print '\t -> ', d, prod_rules[k][d]
rules = []
id = 0
for k, v in prod_rules.iteritems():
sid = 0
for x in prod_rules[k]:
rhs = re.findall("[^()]+", x)
rules.append(("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x]))
if DBG: print ("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x])
sid += 1
id += 1
df = pd.DataFrame(rules)
'''
Graph Generation of Synthetic Graphs
Grow graphs usigng the union of rules from sampled sugbgraphs to predict the target order of the
original graph
exact change to fixed
'''
# hStars = grow_exact_size_hrg_graphs_from_prod_rules(rules, graph_name, G.number_of_nodes(), 10)
hStars = grow_hrg_graphs_with_infinity(rules, graph_name, G.number_of_nodes(), 10,rnbr = 1)
print '... hStart graphs:', len(hStars)
d = {graph_name + "_hstars": hStars}
with open(r"Results/{}_hstars.pickle".format(graph_name), "wb") as output_file:
cPickle.dump(d, output_file)
if os.path.exists(r"Results/{}_hstars.pickle".format(graph_name)): print "File saved"
'''if nstats:
metricx = ['clust']
metrics.network_properties([G], metricx, hStars, name=graph_name, out_tsv=False)
'''
def compute_net_stats_on_read_hrg_pickle(orig_df, gn, metricx):
with open(r"Results/{}_hstars.pickle".format(gn), "rb") as in_file:
c = cPickle.load(in_file)
print " ==> pickle file loaded"
if isinstance(c, dict):
if len(c.keys()) == 1:
c = c.values()[0] # we have k nx gobjects
else:
print c.keys()
if len(orig_df.columns) >= 3:
orig = nx.from_pandas_dataframe(orig_df, 'src', 'trg', edge_attr=['ts'])
else:
orig = nx.from_pandas_dataframe(orig_df, 'src', 'trg')
# metrics.network_properties([orig], metricx, c, name=gn, out_tsv=False)
## --
print 'mp.pool'
p = mp.Pool(processes=20)
for j, gnx in enumerate(c):
if isinstance(gnx, list):
gnx = gnx[0]
p.apply_async(metrics.network_properties, args=([orig], ['clust'], gnx, gn,), callback=collect_results)
print ("p.apply_async(...")
p.close()
p.join()
print (results)
def compute_net_statistics_on(orig_df, gn): # gn = graph name
if gn is "": return
print "%" * 4, gn, "| compute nstats", "%" * 4
hrg_pickle_exists = os.path.exists("Results/{}_hstars.pickle".format(gn))
if hrg_pickle_exists:
metricx = ['clust']
compute_net_stats_on_read_hrg_pickle(orig_df, gn, metricx)
else:
print 'To gen the hrg pickle:', gn
exit()
"""
def main_network_stats(args):
if os.path.exists("datasets/{}.pickle".format(gname)):
print (" ==>","reading from pickle")
orig_pickle = "datasets/{}.pickle".format(gname)
with open(orig_pickle, "rb") as in_file:
G= cPickle.load(in_file)
else:
import vador.datasets_graphs_edgelist as sal
print (" ==>","reading from edgelist")
G = sal.load_edgelist(args['orig'][0])
G.name = gname
nx.write_gpickle(G, "datasets/{}.pickle".format(gname))
from vador.datasets_graphs_edgelist import load_graphs_nxobjects
hrg_p_fname = "Results/{}_hstars.pickle".format(gname)
graphs_d = load_graphs_nxobjects(hrg_p_fname)
graphs_lst = graphs_d.values()[0]
p = mp.Pool(processes=10)
for g in graphs_lst:
# p.apply_async(metrics.network_properties, args=([G], ['clust'], g, gname,True, ), callback=collect_results)
p.apply_async(metrics.clustering_coefficients_single, args=(g, ), callback=collect_results)
p.close()
p.join()
print ("done with metrics")
for j,x in enumerate(results):
if j==0:
df = x
continue
df = pd.concat([df,x])
if 0: print j, "shape", df.shape
gb =df.groupby(['k'])
print (gb['cc'].mean().to_string())
print "-"*10
orig__clust_coef = metrics.clustering_coefficients_single(G)
gb = orig__clust_coef.groupby(['k'])
print (gb['cc'].mean().to_string())
#synth_clust_coef = results
"""
def main_inf_mirr(gFname, gName, rnbr=1):
retVal = None
G = xt.load_edgelist(gFname)
# rnbr = 1
# for j in range(1, rnbr + 1):
# prs = get_hrg_production_rules_given(G)
Hstars = [] # synthetic (stochastically generate) graphs using the graph grammars
ProdRulesKth = []
# Note that therem might not be convergence, b/c the graph may degenerate early
for j in range(0, 10): # nbr of times to do Inf. Mirr. tst
for k in range(0, 1): # nbr of times to feedback the resulting graph
# print ("\tGraph #:",k+1)
prdrls = {}
prod_rules = phrg.probabilistic_hrg_deriving_prod_rules(G)
# print len(prod_rules)
# initialize the Grammar g
g = pcfg.Grammar('S')
for (id, lhs, rhs, prob) in prod_rules:
g.add_rule(pcfg.Rule(id, lhs, rhs, prob))
num_nodes = G.number_of_nodes()
g.set_max_size(num_nodes)
print "Done initializing the grammar data-structure"
# Generate a synthetic graph using HRGs
try:
rule_list = g.sample(num_nodes)
except Exception, e:
print str(e)
rule_list = g.sample(num_nodes)
break
hstar = phrg.grow(rule_list, g)[0]
G = hstar # feed back the newly created graph
# store the last synth graph & restart
Hstars.append(hstar) #
# Warning
# If the rules are not able to generate a graph rerun this step or add a try/catch to retry or continue
print ("~..~" * 10)
nx.write_gpickle(Hstars[0], "Results/inf_mir_{}_{}_{}.p".format(graph.name, k, j))
if os.path.exists("Results/inf_mir_{}_{}_{}.p".format(graph.name, k, j)):
print ("Results/inf_mir_{}_{}_{}.p written".format(graph.name, k, j))
return retVal
if __name__ == '__main__':
parser = get_parser()
args = vars(parser.parse_args())
gname = graph_name(args['orig'][0])
rnbr_arg = args['rnbr']
print rnbr_arg
try:
main_inf_mirr(args['orig'][0], gname, rnbr=rnbr_arg)
except Exception, e:
print 'ERROR, UNEXPECTED SAVE PLOT EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1)
sys.exit(0)
| mit |
glouppe/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
nilmtk/nilm_metadata | nilm_metadata/convert_yaml_to_hdf5.py | 1 | 7610 | from __future__ import print_function, division
import yaml
import pandas as pd
from os.path import isdir, isfile, join, splitext
from os import listdir
from sys import stderr
from copy import deepcopy
from six import iteritems
from .object_concatenation import get_appliance_types
class NilmMetadataError(Exception):
pass
def convert_yaml_to_hdf5(yaml_dir, hdf_filename):
"""Converts a NILM Metadata YAML instance to HDF5.
Also does a set of sanity checks on the metadata.
Parameters
----------
yaml_dir : str
Directory path of all *.YAML files describing this dataset.
hdf_filename : str
Filename and path of output HDF5 file. If file exists then will
attempt to append metadata to file. If file does not exist then
will create it.
"""
assert isdir(yaml_dir)
store = pd.HDFStore(hdf_filename, 'a')
# Load Dataset and MeterDevice metadata
metadata = _load_file(yaml_dir, 'dataset.yaml')
meter_devices = _load_file(yaml_dir, 'meter_devices.yaml')
metadata['meter_devices'] = meter_devices
store.root._v_attrs.metadata = metadata
# Load buildings
building_filenames = [fname for fname in listdir(yaml_dir)
if fname.startswith('building')
and fname.endswith('.yaml')]
for fname in building_filenames:
building = splitext(fname)[0] # e.g. 'building1'
try:
group = store._handle.create_group('/', building)
except:
group = store._handle.get_node('/' + building)
building_metadata = _load_file(yaml_dir, fname)
elec_meters = building_metadata['elec_meters']
_deep_copy_meters(elec_meters)
_set_data_location(elec_meters, building)
_sanity_check_meters(elec_meters, meter_devices)
_sanity_check_appliances(building_metadata)
group._f_setattr('metadata', building_metadata)
store.close()
print("Done converting YAML metadata to HDF5!")
def save_yaml_to_datastore(yaml_dir, store):
"""Saves a NILM Metadata YAML instance to a NILMTK datastore.
Parameters
----------
yaml_dir : str
Directory path of all *.YAML files describing this dataset.
store : DataStore
DataStore object
"""
assert isdir(yaml_dir)
# Load Dataset and MeterDevice metadata
metadata = _load_file(yaml_dir, 'dataset.yaml')
print("Loaded metadata")
meter_devices = _load_file(yaml_dir, 'meter_devices.yaml')
metadata['meter_devices'] = meter_devices
store.save_metadata('/', metadata)
# Load buildings
building_filenames = [fname for fname in listdir(yaml_dir)
if fname.startswith('building')
and fname.endswith('.yaml')]
for fname in building_filenames:
building = splitext(fname)[0] # e.g. 'building1'
building_metadata = _load_file(yaml_dir, fname)
elec_meters = building_metadata['elec_meters']
_deep_copy_meters(elec_meters)
_set_data_location(elec_meters, building)
_sanity_check_meters(elec_meters, meter_devices)
_sanity_check_appliances(building_metadata)
store.save_metadata('/'+building, building_metadata)
store.close()
print("Done converting YAML metadata to HDF5!")
def _load_file(yaml_dir, yaml_filename):
yaml_full_filename = join(yaml_dir, yaml_filename)
if isfile(yaml_full_filename):
with open(yaml_full_filename, 'rb') as fh:
return yaml.safe_load(fh)
else:
print(yaml_full_filename, "not found.", file=stderr)
def _deep_copy_meters(elec_meters):
for meter_instance, meter in iteritems(elec_meters):
elec_meters[meter_instance] = deepcopy(meter)
def _set_data_location(elec_meters, building):
"""Goes through each ElecMeter in elec_meters and sets `data_location`.
Modifies `elec_meters` in place.
Parameters
----------
elec_meters : dict of dicts
building : string e.g. 'building1'
"""
for meter_instance in elec_meters:
data_location = '/{:s}/elec/meter{:d}'.format(building, meter_instance)
elec_meters[meter_instance]['data_location'] = data_location
def _sanity_check_meters(meters, meter_devices):
"""
Checks:
* Make sure all meter devices map to meter_device keys
* Makes sure all IDs are unique
"""
if len(meters) != len(set(meters)):
raise NilmMetadataError("elec_meters not unique")
for meter_instance, meter in iteritems(meters):
assert meter['device_model'] in meter_devices
def _sanity_check_appliances(building_metadata):
"""
Checks:
* Make sure we use proper NILM Metadata names.
* Make sure there aren't multiple appliance types with same instance
"""
appliances = building_metadata['appliances']
appliance_types = get_appliance_types()
building_instance = building_metadata['instance']
REQUIRED_KEYS = ['type', 'instance', 'meters']
for appliance in appliances:
if not isinstance(appliance, dict):
raise NilmMetadataError(
"Appliance '{}' is {} when it should be a dict."
.format(appliance, type(appliance)))
# Generate string for specifying which is the problematic
# appliance for error messages:
appl_string = ("ApplianceType '{}', instance '{}', in building {:d}"
.format(appliance.get('type'),
appliance.get('instance'),
building_instance))
# Check required keys are all present
for key in REQUIRED_KEYS:
if key not in appliance:
raise NilmMetadataError("key '{}' missing for {}"
.format(key, appl_string))
appl_type = appliance['type']
# check all appliance names are valid
if appl_type not in appliance_types:
raise NilmMetadataError(
appl_string + " not in appliance_types."
" In other words, '{}' is not a recognised appliance type."
.format(appl_type))
# Check appliance references valid meters
meters = appliance['meters']
if len(meters) != len(set(meters)):
msg = "In {}, meters '{}' not unique.".format(appl_string, meters)
raise NilmMetadataError(msg)
for meter in meters:
if meter != 0 and meter not in building_metadata['elec_meters']:
msg = ("In ({}), meter '{:d}' is not in"
" this building's 'elec_meters'"
.format(appl_string, meter))
raise NilmMetadataError(msg)
# Check list of instances for each appliance is valid.
appliance_instances = {}
for appliance in appliances:
appl_type = appliance['type']
instances = appliance_instances.setdefault(appl_type, [])
instances.append(appliance['instance'])
for appliance_type, instances in iteritems(appliance_instances):
instances.sort()
correct_instances = list(range(1, len(instances)+1))
if instances != correct_instances:
msg = ("In building {:d}, appliance '{}' appears {:d} time(s)."
" Yet the list of instances is '{}'. The list of instances"
" should be '{}'."
.format(building_metadata['instance'], appliance_type,
len(instances), instances, correct_instances))
raise NilmMetadataError(msg)
| apache-2.0 |
vgupta6/Project-2 | modules/s3_update_check.py | 6 | 7811 | # -*- coding: utf-8 -*-
import os
import sys
try:
from gluon import current
except ImportError:
print >> sys.stderr, """
The installed version of Web2py is too old -- it does not define current.
Please upgrade Web2py to a more recent version.
"""
# Version of 000_config.py
# Increment this if the user should update their running instance
VERSION = 1
#def update_check(environment, template="default"):
def update_check():
"""
Check whether the dependencies are sufficient to run Eden
@ToDo: Integrate into WebSetup:
http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/WebSetup
"""
# Get Web2py environment into our globals.
#globals().update(**environment)
request = current.request
# Fatal errors
errors = []
# Non-fatal warnings
warnings = []
# -------------------------------------------------------------------------
# Check Python libraries
try:
import dateutil
except(ImportError):
errors.append("S3 unresolved dependency: dateutil required for Sahana to run")
try:
import lxml
except(ImportError):
errors.append("S3XML unresolved dependency: lxml required for Sahana to run")
try:
import shapely
except(ImportError):
warnings.append("S3GIS unresolved dependency: shapely required for GIS support")
try:
import xlrd
except(ImportError):
warnings.append("S3XLS unresolved dependency: xlrd required for XLS import")
try:
import xlwt
except(ImportError):
warnings.append("S3XLS unresolved dependency: xlwt required for XLS export")
try:
from PIL import Image
except(ImportError):
try:
import Image
except(ImportError):
warnings.append("S3PDF unresolved dependency: Python Imaging required for PDF export")
try:
import reportlab
except(ImportError):
warnings.append("S3PDF unresolved dependency: reportlab required for PDF export")
try:
import matplotlib
except(ImportError):
warnings.append("S3Chart unresolved dependency: matplotlib required for charting")
try:
import numpy
except(ImportError):
warnings.append("S3Report unresolved dependency: numpy required for pivot table reports")
try:
import tweepy
except(ImportError):
warnings.append("S3Msg unresolved dependency: tweepy required for non-Tropo Twitter support")
try:
import PyRTF
except(ImportError):
warnings.append("Survey unresolved dependency: PyRTF required if you want to export assessment templates as a Word document")
# -------------------------------------------------------------------------
# Check Web2Py version
#
# Currently, the minimum usable Web2py is determined by whether the
# Scheduler is available
web2py_minimum_version = "Version 1.99.3 (2011-10-27 13:23:13)"
web2py_version_ok = True
try:
from gluon.fileutils import parse_version
except ImportError:
web2py_version_ok = False
if web2py_version_ok:
web2py_minimum_datetime = parse_version(web2py_minimum_version)[3]
web2py_installed_datetime = request.global_settings.web2py_version[3]
web2py_version_ok = web2py_installed_datetime >= web2py_minimum_datetime
if not web2py_version_ok:
warnings.append(
"The installed version of Web2py is too old to provide the Scheduler,"
"\nso scheduled tasks will not be available. If you need scheduled tasks,"
"\nplease upgrade Web2py to at least version: %s" % \
web2py_minimum_version)
# -------------------------------------------------------------------------
# Create required directories if needed
app_path = request.folder
databases_dir = os.path.join(app_path, "databases")
try:
os.stat(databases_dir)
except OSError:
# not found, create it
os.mkdir(databases_dir)
# -------------------------------------------------------------------------
# Copy in Templates
# - 000_config.py (machine-specific settings)
# - rest are run in-place
#
template_folder = os.path.join(app_path, "private", "templates")
template_files = {
# source : destination
"000_config.py" : os.path.join("models", "000_config.py"),
}
copied_from_template = []
for t in template_files:
src_path = os.path.join(template_folder, t)
dst_path = os.path.join(app_path, template_files[t])
try:
os.stat(dst_path)
except OSError:
# Not found, copy from template
if t == "000_config.py":
input = open(src_path)
output = open(dst_path, "w")
for line in input:
if "akeytochange" in line:
# Generate a random hmac_key to secure the passwords in case
# the database is compromised
import uuid
hmac_key = uuid.uuid4()
line = 'deployment_settings.auth.hmac_key = "%s"' % hmac_key
output.write(line)
output.close()
input.close()
else:
import shutil
shutil.copy(src_path, dst_path)
copied_from_template.append(template_files[t])
else:
# Found the file in the destination
# Check if it has been edited
import re
edited_pattern = r"FINISHED_EDITING_\w*\s*=\s*(True|False)"
edited_matcher = re.compile(edited_pattern).match
has_edited = False
with open(dst_path) as f:
for line in f:
edited_result = edited_matcher(line)
if edited_result:
has_edited = True
edited = edited_result.group(1)
break
if has_edited and (edited != "True"):
errors.append("Please edit %s before starting the system." % t)
# Check if it's up to date (i.e. a critical update requirement)
version_pattern = r"VERSION =\s*([0-9]+)"
version_matcher = re.compile(version_pattern).match
has_version = False
with open(dst_path) as f:
for line in f:
version_result = version_matcher(line)
if version_result:
has_version = True
version = version_result.group(1)
break
if not has_version:
error = "Your %s is using settings from the old templates system. Please switch to the new templates system: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Templates" % t
errors.append(error)
elif int(version) != VERSION:
error = "Your %s is using settings from template version %s. Please update with new settings from template version %s before starting the system." % \
(t, version, VERSION)
errors.append(error)
if copied_from_template:
errors.append(
"The following files were copied from templates and should be edited: %s" %
", ".join(copied_from_template))
return {"error_messages": errors, "warning_messages": warnings}
# END =========================================================================
| mit |
sgenoud/scikit-learn | examples/plot_iris_classifiers.py | 2 | 1865 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Classifiers Comparison
=========================================================
A Comparison of a K-nearest-neighbours, Logistic Regression and a Linear SVC
classifying the `iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_
dataset.
"""
print __doc__
# Code source: Gael Varoqueux
# Modified for Documentation merge by Jaques Grobler
# License: BSD
import numpy as np
import pylab as pl
from sklearn import neighbors, datasets, linear_model, svm
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
classifiers = dict(
knn=neighbors.KNeighborsClassifier(),
logistic=linear_model.LogisticRegression(C=1e5),
svm=svm.LinearSVC(C=1e5, loss='l1'),
)
fignum = 1
# we create an instance of Neighbours Classifier and fit the data.
for name, clf in classifiers.iteritems():
clf.fit(X, Y)
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure(fignum, figsize=(4, 3))
pl.pcolormesh(xx, yy, Z, cmap=pl.cm.Paired)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.xlabel('Sepal length')
pl.ylabel('Sepal width')
pl.xlim(xx.min(), xx.max())
pl.ylim(yy.min(), yy.max())
pl.xticks(())
pl.yticks(())
fignum += 1
pl.show()
| bsd-3-clause |
dashmoment/facerecognition | py/apps/scripts/fisherfaces_example.py | 2 | 2253 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Philipp Wagner. All rights reserved.
# Licensed under the BSD license. See LICENSE file in the project root for full license information.
import sys
# append facerec to module search path
sys.path.append("../..")
# import facerec stuff
from facerec.dataset import DataSet
from facerec.feature import Fisherfaces
from facerec.distance import EuclideanDistance, CosineDistance
from facerec.classifier import NearestNeighbor
from facerec.classifier import SVM
from facerec.model import PredictableModel
from facerec.validation import KFoldCrossValidation
from facerec.visual import subplot
from facerec.util import minmax_normalize
# import numpy
import numpy as np
# import matplotlib colormaps
import matplotlib.cm as cm
# import for logging
import logging,sys
# set up a handler for logging
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add handler to facerec modules
logger = logging.getLogger("facerec")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# load a dataset (e.g. AT&T Facedatabase)
dataSet = DataSet("/home/philipp/facerec/data/yalefaces_recognition")
# define Fisherfaces as feature extraction method
feature = Fisherfaces()
# define a 1-NN classifier with Euclidean Distance
classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
# define the model as the combination
model = PredictableModel(feature=feature, classifier=classifier)
# show fisherfaces
model.compute(dataSet.data, dataSet.labels)
# turn the first (at most) 16 eigenvectors into grayscale
# images (note: eigenvectors are stored by column!)
E = []
for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
e = model.feature.eigenvectors[:,i].reshape(dataSet.data[0].shape)
E.append(minmax_normalize(e,0,255, dtype=np.uint8))
# plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.pdf")
# perform a 10-fold cross validation
cv = KFoldCrossValidation(model, k=10)
cv.validate(dataSet.data, dataSet.labels)
cv.print_results()
| bsd-3-clause |
poryfly/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
ScottSnapperLab/coordinator_data_tasks | coordinator_data_tasks/utils/loaders.py | 1 | 1783 | #!/usr/bin/env python
"""Provide functions for custom loading of files."""
from pathlib import Path
import gzip
import pandas as pd
from logzero import logger as log
from coordinator_data_tasks.utils import errors as e
def is_csv(path):
"""Return True if the path is probably a csv."""
return 'csv' in Path(path).name.split('.')
def is_excel(path):
"""Return True if the path is probably an excel."""
parts = Path(path).name.split('.')
return any(["xls" in parts, "xlsx" in parts])
def load_table(path: str, **kwargs) -> pd.DataFrame:
"""Smartly load the table whether it's a csv or excel file."""
if is_csv(path):
return load_csv(str(path), **kwargs)
elif is_excel(path):
return load_xls(str(path), **kwargs)
def load_csv(path: str, **kwargs) -> pd.DataFrame:
"""Smartly load a csv whether it's gzipped or not."""
fn = Path(path).name
try:
df = pd.read_csv(gzip.open(path), **kwargs)
except (pd.io.common.CParserError, OSError):
df = pd.read_csv(path, **kwargs)
except pd.io.common.EmptyDataError:
msg = f"File appears to be empty: {fn}."
log.error(msg)
raise
if df.empty:
msg = f"File appears to be empty: {fn}."
log.error(msg)
raise e.ValidationError(msg)
return df
def load_xls(path: str, **kwargs) -> pd.DataFrame:
"""Load an excel table as DataFrame."""
fn = Path(path).name
try:
df = pd.read_excel(path, **kwargs)
except (UnicodeDecodeError, pd.errors.ParserError):
msg = f"File appears to be empty: {fn}."
log.error(msg)
raise
if df.empty:
msg = f"File appears to be empty: {fn}."
log.error(msg)
raise e.ValidationError(msg)
return df
| mit |
cl4rke/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
mkraemer67/pylearn2 | pylearn2/utils/image.py | 39 | 18841 | """
Utility functions for working with images.
"""
import logging
import numpy as np
plt = None
axes = None
from theano.compat.six.moves import xrange
from theano.compat.six import string_types
import warnings
try:
import matplotlib.pyplot as plt
import matplotlib.axes
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
import os
try:
from PIL import Image
except ImportError:
Image = None
from pylearn2.utils import string_utils as string
from pylearn2.utils.exc import reraise_as
from tempfile import mkstemp
from multiprocessing import Process
import subprocess
logger = logging.getLogger(__name__)
def ensure_Image():
"""Makes sure Image has been imported from PIL"""
global Image
if Image is None:
raise RuntimeError("You are trying to use PIL-dependent functionality"
" but don't have PIL installed.")
def imview(*args, **kwargs):
"""
A matplotlib-based image viewer command,
wrapping `matplotlib.pyplot.imshow` but behaving more
sensibly.
Parameters
----------
figure : TODO
TODO: write parameters section using decorators to inherit
the matplotlib docstring
Notes
-----
Parameters are identical to `matplotlib.pyplot.imshow`
but this behaves somewhat differently:
* By default, it creates a new figure (unless a
`figure` keyword argument is supplied.
* It modifies the axes of that figure to use the
full frame, without ticks or tick labels.
* It turns on `nearest` interpolation by default
(i.e., it does not antialias pixel data). This
can be overridden with the `interpolation`
argument as in `imshow`.
All other arguments and keyword arguments are passed
on to `imshow`.`
"""
if 'figure' not in kwargs:
f = plt.figure()
else:
f = kwargs['figure']
new_ax = matplotlib.axes.Axes(f,
[0, 0, 1, 1],
xticks=[],
yticks=[],
frame_on=False)
f.delaxes(f.gca())
f.add_axes(new_ax)
if len(args) < 5 and 'interpolation' not in kwargs:
kwargs['interpolation'] = 'nearest'
plt.imshow(*args, **kwargs)
def imview_async(*args, **kwargs):
"""
A version of `imview` that forks a separate process and
immediately shows the image.
Parameters
----------
window_title : str
TODO: writeme with decorators to inherit the other imviews'
docstrings
Notes
-----
Supports the `window_title` keyword argument to cope with
the title always being 'Figure 1'.
Returns the `multiprocessing.Process` handle.
"""
if 'figure' in kwargs:
raise ValueError("passing a figure argument not supported")
def fork_image_viewer():
f = plt.figure()
kwargs['figure'] = f
imview(*args, **kwargs)
if 'window_title' in kwargs:
f.set_window_title(kwargs['window_title'])
plt.show()
p = Process(None, fork_image_viewer)
p.start()
return p
def show(image):
"""
.. todo::
WRITEME
Parameters
----------
image : PIL Image object or ndarray
If ndarray, integer formats are assumed to use 0-255
and float formats are assumed to use 0-1
"""
viewer_command = string.preprocess('${PYLEARN2_VIEWER_COMMAND}')
if viewer_command == 'inline':
return imview(image)
if hasattr(image, '__array__'):
# do some shape checking because PIL just raises a tuple indexing error
# that doesn't make it very clear what the problem is
if len(image.shape) < 2 or len(image.shape) > 3:
raise ValueError('image must have either 2 or 3 dimensions but its'
' shape is ' + str(image.shape))
# The below is a temporary workaround that prevents us from crashing
# 3rd party image viewers such as eog by writing out overly large
# images.
# In the long run we should determine if this is a bug in PIL when
# producing
# such images or a bug in eog and determine a proper fix.
# Since this is hopefully just a short term workaround the
# constants below are not included in the interface to the
# function, so that 3rd party code won't start passing them.
max_height = 4096
max_width = 4096
# Display separate warnings for each direction, since it's
# common to crop only one.
if image.shape[0] > max_height:
image = image[0:max_height, :, :]
warnings.warn("Cropping image to smaller height to avoid crashing "
"the viewer program.")
if image.shape[0] > max_width:
image = image[:, 0:max_width, :]
warnings.warn("Cropping the image to a smaller width to avoid "
"crashing the viewer program.")
# This ends the workaround
if image.dtype == 'int8':
image = np.cast['uint8'](image)
elif str(image.dtype).startswith('float'):
# don't use *=, we don't want to modify the input array
image = image * 255.
image = np.cast['uint8'](image)
# PIL is too stupid to handle single-channel arrays
if len(image.shape) == 3 and image.shape[2] == 1:
image = image[:, :, 0]
try:
ensure_Image()
image = Image.fromarray(image)
except TypeError:
reraise_as(TypeError("PIL issued TypeError on ndarray of shape " +
str(image.shape) + " and dtype " +
str(image.dtype)))
# Create a temporary file with the suffix '.png'.
fd, name = mkstemp(suffix='.png')
os.close(fd)
# Note:
# Although we can use tempfile.NamedTemporaryFile() to create
# a temporary file, the function should be used with care.
#
# In Python earlier than 2.7, a temporary file created by the
# function will be deleted just after the file is closed.
# We can re-use the name of the temporary file, but there is an
# instant where a file with the name does not exist in the file
# system before we re-use the name. This may cause a race
# condition.
#
# In Python 2.7 or later, tempfile.NamedTemporaryFile() has
# the 'delete' argument which can control whether a temporary
# file will be automatically deleted or not. With the argument,
# the above race condition can be avoided.
#
image.save(name)
if os.name == 'nt':
subprocess.Popen(viewer_command + ' ' + name + ' && del ' + name,
shell=True)
else:
subprocess.Popen(viewer_command + ' ' + name + ' ; rm ' + name,
shell=True)
def pil_from_ndarray(ndarray):
"""
Converts an ndarray to a PIL image.
Parameters
----------
ndarray : ndarray
An ndarray containing an image.
Returns
-------
pil : PIL Image
A PIL Image containing the image.
"""
try:
if ndarray.dtype == 'float32' or ndarray.dtype == 'float64':
assert ndarray.min() >= 0.0
assert ndarray.max() <= 1.0
ndarray = np.cast['uint8'](ndarray * 255)
if len(ndarray.shape) == 3 and ndarray.shape[2] == 1:
ndarray = ndarray[:, :, 0]
ensure_Image()
rval = Image.fromarray(ndarray)
return rval
except Exception as e:
logger.exception('original exception: ')
logger.exception(e)
logger.exception('ndarray.dtype: {0}'.format(ndarray.dtype))
logger.exception('ndarray.shape: {0}'.format(ndarray.shape))
raise
assert False
def ndarray_from_pil(pil, dtype='uint8'):
"""
Converts a PIL Image to an ndarray.
Parameters
----------
pil : PIL Image
An image represented as a PIL Image object
dtype : str
The dtype of ndarray to create
Returns
-------
ndarray : ndarray
The image as an ndarray.
"""
rval = np.asarray(pil)
if dtype != rval.dtype:
rval = np.cast[dtype](rval)
if str(dtype).startswith('float'):
rval /= 255.
if len(rval.shape) == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
return rval
def rescale(image, shape):
"""
Scales image to be no larger than shape. PIL might give you
unexpected results beyond that.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
i = pil_from_ndarray(image)
ensure_Image()
i.thumbnail([shape[1], shape[0]], Image.ANTIALIAS)
rval = ndarray_from_pil(i, dtype=image.dtype)
return rval
resize = rescale
def fit_inside(image, shape):
"""
Scales image down to fit inside shape preserves proportions of image
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
if image.shape[0] <= shape[0] and image.shape[1] <= shape[1]:
return image.copy()
row_ratio = float(image.shape[0]) / float(shape[0])
col_ratio = float(image.shape[1]) / float(shape[1])
if row_ratio > col_ratio:
target_shape = [shape[0], min(image.shape[1] / row_ratio, shape[1])]
else:
target_shape = [min(image.shape[0] / col_ratio, shape[0]), shape[1]]
assert target_shape[0] <= shape[0]
assert target_shape[1] <= shape[1]
assert target_shape[0] == shape[0] or target_shape[1] == shape[1]
rval = rescale(image, target_shape)
return rval
def letterbox(image, shape):
"""
Pads image with black letterboxing to bring image.shape up to shape
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
assert image.shape[0] <= shape[0]
assert image.shape[1] <= shape[1]
if image.shape[0] == shape[0] and image.shape[1] == shape[1]:
return image.copy()
rval = np.zeros((shape[0], shape[1], image.shape[2]), dtype=image.dtype)
rstart = (shape[0] - image.shape[0]) / 2
cstart = (shape[1] - image.shape[1]) / 2
rend = rstart + image.shape[0]
cend = cstart + image.shape[1]
rval[rstart:rend, cstart:cend] = image
return rval
def make_letterboxed_thumbnail(image, shape):
"""
Scales image down to shape. Preserves proportions of image, introduces
black letterboxing if necessary.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3
assert len(shape) == 2
shrunk = fit_inside(image, shape)
letterboxed = letterbox(shrunk, shape)
return letterboxed
def load(filepath, rescale_image=True, dtype='float64'):
"""
Load an image from a file.
Parameters
----------
filepath : str
Path to the image file to load
rescale_image : bool
Default value: True
If True, returned images have pixel values in [0, 1]. Otherwise,
values are in [0, 255].
dtype: str
The dtype to use for the returned value
Returns
-------
img : numpy ndarray
An array containing the image that was in the file.
"""
assert isinstance(filepath, string_types)
if not rescale_image and dtype == 'uint8':
ensure_Image()
rval = np.asarray(Image.open(filepath))
assert rval.dtype == 'uint8'
return rval
s = 1.0
if rescale_image:
s = 255.
try:
ensure_Image()
rval = Image.open(filepath)
except Exception:
reraise_as(Exception("Could not open " + filepath))
numpy_rval = np.array(rval)
msg = ("Tried to load an image, got an array with %d"
" dimensions. Expected 2 or 3."
"This may indicate a mildly corrupted image file. Try "
"converting it to a different image format with a different "
"editor like gimp or imagemagic. Sometimes these programs are "
"more robust to minor corruption than PIL and will emit a "
"correctly formatted image in the new format.")
if numpy_rval.ndim not in [2, 3]:
logger.error(dir(rval))
logger.error(rval)
logger.error(rval.size)
rval.show()
raise AssertionError(msg % numpy_rval.ndim)
rval = numpy_rval
rval = np.cast[dtype](rval) / s
if rval.ndim == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
if rval.ndim != 3:
raise AssertionError("Something went wrong opening " +
filepath + '. Resulting shape is ' +
str(rval.shape) +
" (it's meant to have 3 dimensions by now)")
return rval
def save(filepath, ndarray):
"""
Saves an image to a file.
Parameters
----------
filepath : str
The path to write the file to.
ndarray : ndarray
An array containing the image to be saved.
"""
pil_from_ndarray(ndarray).save(filepath)
def scale_to_unit_interval(ndar, eps=1e-8):
"""
Scales all values in the ndarray ndar to be between 0 and 1
Parameters
----------
ndar : WRITEME
eps : WRITEME
Returns
-------
WRITEME
"""
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
Parameters
----------
x : numpy.ndarray
2-d ndarray or 4 tuple of 2-d ndarrays or None for channels,
in which every row is a flattened image.
shape : 2-tuple of ints
The first component is the height of each image,
the second component is the width.
tile_shape : 2-tuple of ints
The number of images to tile in (row, columns) form.
scale_rows_to_unit_interval : bool
Whether or not the values need to be before being plotted to [0, 1].
output_pixel_vals : bool
Whether or not the output should be pixel values (int8) or floats.
Returns
-------
y : 2d-ndarray
The return value has the same dtype as X, and is suitable for
viewing as an image with PIL.Image.fromarray.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
# colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = np.zeros(out_shape, dtype=dt) + \
channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
if __name__ == '__main__':
black = np.zeros((50, 50, 3), dtype='uint8')
red = black.copy()
red[:, :, 0] = 255
green = black.copy()
green[:, :, 1] = 255
show(black)
show(green)
show(red)
| bsd-3-clause |
basnijholt/holoviews | holoviews/plotting/mpl/util.py | 1 | 12442 | from __future__ import absolute_import, division, unicode_literals
import re
import warnings
import numpy as np
import matplotlib
from matplotlib import units as munits
from matplotlib import ticker
from matplotlib.colors import cnames
from matplotlib.lines import Line2D
from matplotlib.markers import MarkerStyle
from matplotlib.patches import Path, PathPatch
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
from matplotlib.rcsetup import (
validate_capstyle, validate_fontsize, validate_fonttype, validate_hatch,
validate_joinstyle)
try:
from nc_time_axis import NetCDFTimeConverter, CalendarDateTime
nc_axis_available = True
except:
from matplotlib.dates import DateConverter
NetCDFTimeConverter = DateConverter
nc_axis_available = False
from ...core.util import (
LooseVersion, _getargspec, basestring, cftime_types, is_number)
from ...element import Raster, RGB, Polygons
from ..util import COLOR_ALIASES, RGB_HEX_REGEX
mpl_version = LooseVersion(matplotlib.__version__)
def is_color(color):
"""
Checks if supplied object is a valid color spec.
"""
if not isinstance(color, basestring):
return False
elif RGB_HEX_REGEX.match(color):
return True
elif color in COLOR_ALIASES:
return True
elif color in cnames:
return True
return False
validators = {
'alpha': lambda x: is_number(x) and (0 <= x <= 1),
'capstyle': validate_capstyle,
'color': is_color,
'fontsize': validate_fontsize,
'fonttype': validate_fonttype,
'hatch': validate_hatch,
'joinstyle': validate_joinstyle,
'marker': lambda x: (x in Line2D.markers or isinstance(x, MarkerStyle)
or isinstance(x, Path) or
(isinstance(x, basestring) and x.startswith('$')
and x.endswith('$'))),
's': lambda x: is_number(x) and (x >= 0)
}
def get_validator(style):
for k, v in validators.items():
if style.endswith(k) and (len(style) != 1 or style == k):
return v
def validate(style, value, vectorized=True):
"""
Validates a style and associated value.
Arguments
---------
style: str
The style to validate (e.g. 'color', 'size' or 'marker')
value:
The style value to validate
vectorized: bool
Whether validator should allow vectorized setting
Returns
-------
valid: boolean or None
If validation is supported returns boolean, otherwise None
"""
validator = get_validator(style)
if validator is None:
return None
if isinstance(value, (np.ndarray, list)) and vectorized:
return all(validator(v) for v in value)
try:
valid = validator(value)
return False if valid == False else True
except:
return False
def filter_styles(style, group, other_groups, blacklist=[]):
"""
Filters styles which are specific to a particular artist, e.g.
for a GraphPlot this will filter options specific to the nodes and
edges.
Arguments
---------
style: dict
Dictionary of styles and values
group: str
Group within the styles to filter for
other_groups: list
Other groups to filter out
blacklist: list (optional)
List of options to filter out
Returns
-------
filtered: dict
Filtered dictionary of styles
"""
group = group+'_'
filtered = {}
for k, v in style.items():
if (any(k.startswith(p) for p in other_groups)
or k.startswith(group) or k in blacklist):
continue
filtered[k] = v
for k, v in style.items():
if not k.startswith(group) or k in blacklist:
continue
filtered[k[len(group):]] = v
return filtered
def wrap_formatter(formatter):
"""
Wraps formatting function or string in
appropriate matplotlib formatter type.
"""
if isinstance(formatter, ticker.Formatter):
return formatter
elif callable(formatter):
args = [arg for arg in _getargspec(formatter).args
if arg != 'self']
wrapped = formatter
if len(args) == 1:
def wrapped(val, pos=None):
return formatter(val)
return ticker.FuncFormatter(wrapped)
elif isinstance(formatter, basestring):
if re.findall(r"\{(\w+)\}", formatter):
return ticker.StrMethodFormatter(formatter)
else:
return ticker.FormatStrFormatter(formatter)
def unpack_adjoints(ratios):
new_ratios = {}
offset = 0
for k, (num, ratios) in sorted(ratios.items()):
unpacked = [[] for _ in range(num)]
for r in ratios:
nr = len(r)
for i in range(num):
unpacked[i].append(r[i] if i < nr else np.nan)
for i, r in enumerate(unpacked):
new_ratios[k+i+offset] = r
offset += num-1
return new_ratios
def normalize_ratios(ratios):
normalized = {}
for i, v in enumerate(zip(*ratios.values())):
arr = np.array(v)
normalized[i] = arr/float(np.nanmax(arr))
return normalized
def compute_ratios(ratios, normalized=True):
unpacked = unpack_adjoints(ratios)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
if normalized:
unpacked = normalize_ratios(unpacked)
sorted_ratios = sorted(unpacked.items())
return np.nanmax(np.vstack([v for _, v in sorted_ratios]), axis=0)
def axis_overlap(ax1, ax2):
"""
Tests whether two axes overlap vertically
"""
b1, t1 = ax1.get_position().intervaly
b2, t2 = ax2.get_position().intervaly
return t1 > b2 and b1 < t2
def resolve_rows(rows):
"""
Recursively iterate over lists of axes merging
them by their vertical overlap leaving a list
of rows.
"""
merged_rows = []
for row in rows:
overlap = False
for mrow in merged_rows:
if any(axis_overlap(ax1, ax2) for ax1 in row
for ax2 in mrow):
mrow += row
overlap = True
break
if not overlap:
merged_rows.append(row)
if rows == merged_rows:
return rows
else:
return resolve_rows(merged_rows)
def fix_aspect(fig, nrows, ncols, title=None, extra_artists=[],
vspace=0.2, hspace=0.2):
"""
Calculate heights and widths of axes and adjust
the size of the figure to match the aspect.
"""
fig.canvas.draw()
w, h = fig.get_size_inches()
# Compute maximum height and width of each row and columns
rows = resolve_rows([[ax] for ax in fig.axes])
rs, cs = len(rows), max([len(r) for r in rows])
heights = [[] for i in range(cs)]
widths = [[] for i in range(rs)]
for r, row in enumerate(rows):
for c, ax in enumerate(row):
bbox = ax.get_tightbbox(fig.canvas.get_renderer())
heights[c].append(bbox.height)
widths[r].append(bbox.width)
height = (max([sum(c) for c in heights])) + nrows*vspace*fig.dpi
width = (max([sum(r) for r in widths])) + ncols*hspace*fig.dpi
# Compute aspect and set new size (in inches)
aspect = height/width
offset = 0
if title and title.get_text():
offset = title.get_window_extent().height/fig.dpi
fig.set_size_inches(w, (w*aspect)+offset)
# Redraw and adjust title position if defined
fig.canvas.draw()
if title and title.get_text():
extra_artists = [a for a in extra_artists
if a is not title]
bbox = get_tight_bbox(fig, extra_artists)
top = bbox.intervaly[1]
if title and title.get_text():
title.set_y((top/(w*aspect)))
def get_tight_bbox(fig, bbox_extra_artists=[], pad=None):
"""
Compute a tight bounding box around all the artists in the figure.
"""
renderer = fig.canvas.get_renderer()
bbox_inches = fig.get_tightbbox(renderer)
bbox_artists = bbox_extra_artists[:]
bbox_artists += fig.get_default_bbox_extra_artists()
bbox_filtered = []
for a in bbox_artists:
bbox = a.get_window_extent(renderer)
if isinstance(bbox, tuple):
continue
if a.get_clip_on():
clip_box = a.get_clip_box()
if clip_box is not None:
bbox = Bbox.intersection(bbox, clip_box)
clip_path = a.get_clip_path()
if clip_path is not None and bbox is not None:
clip_path = clip_path.get_fully_transformed_path()
bbox = Bbox.intersection(bbox,
clip_path.get_extents())
if bbox is not None and (bbox.width != 0 or
bbox.height != 0):
bbox_filtered.append(bbox)
if bbox_filtered:
_bbox = Bbox.union(bbox_filtered)
trans = Affine2D().scale(1.0 / fig.dpi)
bbox_extra = TransformedBbox(_bbox, trans)
bbox_inches = Bbox.union([bbox_inches, bbox_extra])
return bbox_inches.padded(pad) if pad else bbox_inches
def get_raster_array(image):
"""
Return the array data from any Raster or Image type
"""
if isinstance(image, RGB):
rgb = image.rgb
data = np.dstack([np.flipud(rgb.dimension_values(d, flat=False))
for d in rgb.vdims])
else:
data = image.dimension_values(2, flat=False)
if type(image) is Raster:
data = data.T
else:
data = np.flipud(data)
return data
def ring_coding(array):
"""
Produces matplotlib Path codes for exterior and interior rings
of a polygon geometry.
"""
# The codes will be all "LINETO" commands, except for "MOVETO"s at the
# beginning of each subpath
n = len(array)
codes = np.ones(n, dtype=Path.code_type) * Path.LINETO
codes[0] = Path.MOVETO
codes[-1] = Path.CLOSEPOLY
return codes
def polygons_to_path_patches(element):
"""
Converts Polygons into list of lists of matplotlib.patches.PathPatch
objects including any specified holes. Each list represents one
(multi-)polygon.
"""
paths = element.split(datatype='array', dimensions=element.kdims)
has_holes = isinstance(element, Polygons) and element.interface.has_holes(element)
holes = element.interface.holes(element) if has_holes else None
mpl_paths = []
for i, path in enumerate(paths):
splits = np.where(np.isnan(path[:, :2].astype('float')).sum(axis=1))[0]
arrays = np.split(path, splits+1) if len(splits) else [path]
subpath = []
for j, array in enumerate(arrays):
if j != (len(arrays)-1):
array = array[:-1]
if (array[0] != array[-1]).any():
array = np.append(array, array[:1], axis=0)
interiors = []
for interior in (holes[i][j] if has_holes else []):
if (interior[0] != interior[-1]).any():
interior = np.append(interior, interior[:1], axis=0)
interiors.append(interior)
vertices = np.concatenate([array]+interiors)
codes = np.concatenate([ring_coding(array)]+
[ring_coding(h) for h in interiors])
subpath.append(PathPatch(Path(vertices, codes)))
mpl_paths.append(subpath)
return mpl_paths
class CFTimeConverter(NetCDFTimeConverter):
"""
Defines conversions for cftime types by extending nc_time_axis.
"""
@classmethod
def convert(cls, value, unit, axis):
if not nc_axis_available:
raise ValueError('In order to display cftime types with '
'matplotlib install the nc_time_axis '
'library using pip or from conda-forge '
'using:\n\tconda install -c conda-forge '
'nc_time_axis')
if isinstance(value, cftime_types):
value = CalendarDateTime(value.datetime, value.calendar)
elif isinstance(value, np.ndarray):
value = np.array([CalendarDateTime(v.datetime, v.calendar) for v in value])
return super(CFTimeConverter, cls).convert(value, unit, axis)
for cft in cftime_types:
munits.registry[cft] = CFTimeConverter()
| bsd-3-clause |
akhilaananthram/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/mathtext.py | 69 | 101723 | r"""
:mod:`~matplotlib.mathtext` is a module for parsing a subset of the
TeX math syntax and drawing them to a matplotlib backend.
For a tutorial of its usage see :ref:`mathtext-tutorial`. This
document is primarily concerned with implementation details.
The module uses pyparsing_ to parse the TeX expression.
.. _pyparsing: http://pyparsing.wikispaces.com/
The Bakoma distribution of the TeX Computer Modern fonts, and STIX
fonts are supported. There is experimental support for using
arbitrary fonts, but results may vary without proper tweaking and
metrics for those fonts.
If you find TeX expressions that don't parse or render properly,
please email [email protected], but please check KNOWN ISSUES below first.
"""
from __future__ import division
import os
from cStringIO import StringIO
from math import ceil
try:
set
except NameError:
from sets import Set as set
import unicodedata
from warnings import warn
from numpy import inf, isinf
import numpy as np
from matplotlib.pyparsing import Combine, Group, Optional, Forward, \
Literal, OneOrMore, ZeroOrMore, ParseException, Empty, \
ParseResults, Suppress, oneOf, StringEnd, ParseFatalException, \
FollowedBy, Regex, ParserElement
# Enable packrat parsing
ParserElement.enablePackrat()
from matplotlib.afm import AFM
from matplotlib.cbook import Bunch, get_realpath_and_stat, \
is_string_like, maxdict
from matplotlib.ft2font import FT2Font, FT2Image, KERNING_DEFAULT, LOAD_FORCE_AUTOHINT, LOAD_NO_HINTING
from matplotlib.font_manager import findfont, FontProperties
from matplotlib._mathtext_data import latex_to_bakoma, \
latex_to_standard, tex2uni, latex_to_cmex, stix_virtual_fonts
from matplotlib import get_data_path, rcParams
import matplotlib.colors as mcolors
import matplotlib._png as _png
####################
##############################################################################
# FONTS
def get_unicode_index(symbol):
"""get_unicode_index(symbol) -> integer
Return the integer index (from the Unicode table) of symbol. *symbol*
can be a single unicode character, a TeX command (i.e. r'\pi'), or a
Type1 symbol name (i.e. 'phi').
"""
# From UTF #25: U+2212 minus sign is the preferred
# representation of the unary and binary minus sign rather than
# the ASCII-derived U+002D hyphen-minus, because minus sign is
# unambiguous and because it is rendered with a more desirable
# length, usually longer than a hyphen.
if symbol == '-':
return 0x2212
try:# This will succeed if symbol is a single unicode char
return ord(symbol)
except TypeError:
pass
try:# Is symbol a TeX symbol (i.e. \alpha)
return tex2uni[symbol.strip("\\")]
except KeyError:
message = """'%(symbol)s' is not a valid Unicode character or
TeX/Type1 symbol"""%locals()
raise ValueError, message
class MathtextBackend(object):
"""
The base class for the mathtext backend-specific code. The
purpose of :class:`MathtextBackend` subclasses is to interface
between mathtext and a specific matplotlib graphics backend.
Subclasses need to override the following:
- :meth:`render_glyph`
- :meth:`render_filled_rect`
- :meth:`get_results`
And optionally, if you need to use a Freetype hinting style:
- :meth:`get_hinting_type`
"""
def __init__(self):
self.fonts_object = None
def set_canvas_size(self, w, h, d):
'Dimension the drawing canvas'
self.width = w
self.height = h
self.depth = d
def render_glyph(self, ox, oy, info):
"""
Draw a glyph described by *info* to the reference point (*ox*,
*oy*).
"""
raise NotImplementedError()
def render_filled_rect(self, x1, y1, x2, y2):
"""
Draw a filled black rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
raise NotImplementedError()
def get_results(self, box):
"""
Return a backend-specific tuple to return to the backend after
all processing is done.
"""
raise NotImplementedError()
def get_hinting_type(self):
"""
Get the Freetype hinting type to use with this particular
backend.
"""
return LOAD_NO_HINTING
class MathtextBackendBbox(MathtextBackend):
"""
A backend whose only purpose is to get a precise bounding box.
Only required for the Agg backend.
"""
def __init__(self, real_backend):
MathtextBackend.__init__(self)
self.bbox = [0, 0, 0, 0]
self.real_backend = real_backend
def _update_bbox(self, x1, y1, x2, y2):
self.bbox = [min(self.bbox[0], x1),
min(self.bbox[1], y1),
max(self.bbox[2], x2),
max(self.bbox[3], y2)]
def render_glyph(self, ox, oy, info):
self._update_bbox(ox + info.metrics.xmin,
oy - info.metrics.ymax,
ox + info.metrics.xmax,
oy - info.metrics.ymin)
def render_rect_filled(self, x1, y1, x2, y2):
self._update_bbox(x1, y1, x2, y2)
def get_results(self, box):
orig_height = box.height
orig_depth = box.depth
ship(0, 0, box)
bbox = self.bbox
bbox = [bbox[0] - 1, bbox[1] - 1, bbox[2] + 1, bbox[3] + 1]
self._switch_to_real_backend()
self.fonts_object.set_canvas_size(
bbox[2] - bbox[0],
(bbox[3] - bbox[1]) - orig_depth,
(bbox[3] - bbox[1]) - orig_height)
ship(-bbox[0], -bbox[1], box)
return self.fonts_object.get_results(box)
def get_hinting_type(self):
return self.real_backend.get_hinting_type()
def _switch_to_real_backend(self):
self.fonts_object.mathtext_backend = self.real_backend
self.real_backend.fonts_object = self.fonts_object
self.real_backend.ox = self.bbox[0]
self.real_backend.oy = self.bbox[1]
class MathtextBackendAggRender(MathtextBackend):
"""
Render glyphs and rectangles to an FTImage buffer, which is later
transferred to the Agg image by the Agg backend.
"""
def __init__(self):
self.ox = 0
self.oy = 0
self.image = None
MathtextBackend.__init__(self)
def set_canvas_size(self, w, h, d):
MathtextBackend.set_canvas_size(self, w, h, d)
self.image = FT2Image(ceil(w), ceil(h + d))
def render_glyph(self, ox, oy, info):
info.font.draw_glyph_to_bitmap(
self.image, ox, oy - info.metrics.ymax, info.glyph)
def render_rect_filled(self, x1, y1, x2, y2):
height = max(int(y2 - y1) - 1, 0)
if height == 0:
center = (y2 + y1) / 2.0
y = int(center - (height + 1) / 2.0)
else:
y = int(y1)
self.image.draw_rect_filled(int(x1), y, ceil(x2), y + height)
def get_results(self, box):
return (self.ox,
self.oy,
self.width,
self.height + self.depth,
self.depth,
self.image,
self.fonts_object.get_used_characters())
def get_hinting_type(self):
return LOAD_FORCE_AUTOHINT
def MathtextBackendAgg():
return MathtextBackendBbox(MathtextBackendAggRender())
class MathtextBackendBitmapRender(MathtextBackendAggRender):
def get_results(self, box):
return self.image, self.depth
def MathtextBackendBitmap():
"""
A backend to generate standalone mathtext images. No additional
matplotlib backend is required.
"""
return MathtextBackendBbox(MathtextBackendBitmapRender())
class MathtextBackendPs(MathtextBackend):
"""
Store information to write a mathtext rendering to the PostScript
backend.
"""
def __init__(self):
self.pswriter = StringIO()
self.lastfont = None
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
postscript_name = info.postscript_name
fontsize = info.fontsize
symbol_name = info.symbol_name
if (postscript_name, fontsize) != self.lastfont:
ps = """/%(postscript_name)s findfont
%(fontsize)s scalefont
setfont
""" % locals()
self.lastfont = postscript_name, fontsize
self.pswriter.write(ps)
ps = """%(ox)f %(oy)f moveto
/%(symbol_name)s glyphshow\n
""" % locals()
self.pswriter.write(ps)
def render_rect_filled(self, x1, y1, x2, y2):
ps = "%f %f %f %f rectfill\n" % (x1, self.height - y2, x2 - x1, y2 - y1)
self.pswriter.write(ps)
def get_results(self, box):
ship(0, -self.depth, box)
#print self.depth
return (self.width,
self.height + self.depth,
self.depth,
self.pswriter,
self.fonts_object.get_used_characters())
class MathtextBackendPdf(MathtextBackend):
"""
Store information to write a mathtext rendering to the PDF
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
filename = info.font.fname
oy = self.height - oy + info.offset
self.glyphs.append(
(ox, oy, filename, info.fontsize,
info.num, info.symbol_name))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append((x1, self.height - y2, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects,
self.fonts_object.get_used_characters())
class MathtextBackendSvg(MathtextBackend):
"""
Store information to write a mathtext rendering to the SVG
backend.
"""
def __init__(self):
self.svg_glyphs = []
self.svg_rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
thetext = unichr(info.num)
self.svg_glyphs.append(
(info.font, info.fontsize, thetext, ox, oy, info.metrics))
def render_rect_filled(self, x1, y1, x2, y2):
self.svg_rects.append(
(x1, self.height - y1 + 1, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
svg_elements = Bunch(svg_glyphs = self.svg_glyphs,
svg_rects = self.svg_rects)
return (self.width,
self.height + self.depth,
self.depth,
svg_elements,
self.fonts_object.get_used_characters())
class MathtextBackendCairo(MathtextBackend):
"""
Store information to write a mathtext rendering to the Cairo
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = oy - info.offset - self.height
thetext = unichr(info.num)
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, y1 - self.height, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class Fonts(object):
"""
An abstract base class for a system of fonts to use for mathtext.
The class must be able to take symbol keys and font file names and
return the character metrics. It also delegates to a backend class
to do the actual drawing.
"""
def __init__(self, default_font_prop, mathtext_backend):
"""
*default_font_prop*: A
:class:`~matplotlib.font_manager.FontProperties` object to use
for the default non-math font, or the base font for Unicode
(generic) font rendering.
*mathtext_backend*: A subclass of :class:`MathTextBackend`
used to delegate the actual rendering.
"""
self.default_font_prop = default_font_prop
self.mathtext_backend = mathtext_backend
# Make these classes doubly-linked
self.mathtext_backend.fonts_object = self
self.used_characters = {}
def destroy(self):
"""
Fix any cyclical references before the object is about
to be destroyed.
"""
self.used_characters = None
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
"""
Get the kerning distance for font between *sym1* and *sym2*.
*fontX*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default (non-math)
*fontclassX*: TODO
*symX*: a symbol in raw TeX form. e.g. '1', 'x' or '\sigma'
*fontsizeX*: the fontsize in points
*dpi*: the current dots-per-inch
"""
return 0.
def get_metrics(self, font, font_class, sym, fontsize, dpi):
"""
*font*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default (non-math)
*font_class*: TODO
*sym*: a symbol in raw TeX form. e.g. '1', 'x' or '\sigma'
*fontsize*: font size in points
*dpi*: current dots-per-inch
Returns an object with the following attributes:
- *advance*: The advance distance (in points) of the glyph.
- *height*: The height of the glyph in points.
- *width*: The width of the glyph in points.
- *xmin*, *xmax*, *ymin*, *ymax* - the ink rectangle of the glyph
- *iceberg* - the distance from the baseline to the top of
the glyph. This corresponds to TeX's definition of
"height".
"""
info = self._get_info(font, font_class, sym, fontsize, dpi)
return info.metrics
def set_canvas_size(self, w, h, d):
"""
Set the size of the buffer used to render the math expression.
Only really necessary for the bitmap backends.
"""
self.width, self.height, self.depth = ceil(w), ceil(h), ceil(d)
self.mathtext_backend.set_canvas_size(self.width, self.height, self.depth)
def render_glyph(self, ox, oy, facename, font_class, sym, fontsize, dpi):
"""
Draw a glyph at
- *ox*, *oy*: position
- *facename*: One of the TeX face names
- *font_class*:
- *sym*: TeX symbol name or single character
- *fontsize*: fontsize in points
- *dpi*: The dpi to draw at.
"""
info = self._get_info(facename, font_class, sym, fontsize, dpi)
realpath, stat_key = get_realpath_and_stat(info.font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].add(info.num)
self.mathtext_backend.render_glyph(ox, oy, info)
def render_rect_filled(self, x1, y1, x2, y2):
"""
Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
self.mathtext_backend.render_rect_filled(x1, y1, x2, y2)
def get_xheight(self, font, fontsize, dpi):
"""
Get the xheight for the given *font* and *fontsize*.
"""
raise NotImplementedError()
def get_underline_thickness(self, font, fontsize, dpi):
"""
Get the line thickness that matches the given font. Used as a
base unit for drawing lines such as in a fraction or radical.
"""
raise NotImplementedError()
def get_used_characters(self):
"""
Get the set of characters that were used in the math
expression. Used by backends that need to subset fonts so
they know which glyphs to include.
"""
return self.used_characters
def get_results(self, box):
"""
Get the data needed by the backend to render the math
expression. The return value is backend-specific.
"""
return self.mathtext_backend.get_results(box)
def get_sized_alternatives_for_symbol(self, fontname, sym):
"""
Override if your font provides multiple sizes of the same
symbol. Should return a list of symbols matching *sym* in
various sizes. The expression renderer will select the most
appropriate size for a given situation from this list.
"""
return [(fontname, sym)]
class TruetypeFonts(Fonts):
"""
A generic base class for all font setups that use Truetype fonts
(through FT2Font).
"""
class CachedFont:
def __init__(self, font):
self.font = font
self.charmap = font.get_charmap()
self.glyphmap = dict(
[(glyphind, ccode) for ccode, glyphind in self.charmap.iteritems()])
def __repr__(self):
return repr(self.font)
def __init__(self, default_font_prop, mathtext_backend):
Fonts.__init__(self, default_font_prop, mathtext_backend)
self.glyphd = {}
self._fonts = {}
filename = findfont(default_font_prop)
default_font = self.CachedFont(FT2Font(str(filename)))
self._fonts['default'] = default_font
def destroy(self):
self.glyphd = None
Fonts.destroy(self)
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self._fonts.get(basename)
if cached_font is None:
font = FT2Font(basename)
cached_font = self.CachedFont(font)
self._fonts[basename] = cached_font
self._fonts[font.postscript_name] = cached_font
self._fonts[font.postscript_name.lower()] = cached_font
return cached_font
def _get_offset(self, cached_font, glyph, fontsize, dpi):
if cached_font.font.postscript_name == 'Cmex10':
return glyph.height/64.0/2.0 + 256.0/64.0 * dpi/72.0
return 0.
def _get_info(self, fontname, font_class, sym, fontsize, dpi):
key = fontname, font_class, sym, fontsize, dpi
bunch = self.glyphd.get(key)
if bunch is not None:
return bunch
cached_font, num, symbol_name, fontsize, slanted = \
self._get_glyph(fontname, font_class, sym, fontsize)
font = cached_font.font
font.set_size(fontsize, dpi)
glyph = font.load_char(
num,
flags=self.mathtext_backend.get_hinting_type())
xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox]
offset = self._get_offset(cached_font, glyph, fontsize, dpi)
metrics = Bunch(
advance = glyph.linearHoriAdvance/65536.0,
height = glyph.height/64.0,
width = glyph.width/64.0,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = glyph.horiBearingY/64.0 + offset,
slanted = slanted
)
result = self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.postscript_name,
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return result
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
cached_font.font.set_size(fontsize, dpi)
pclt = cached_font.font.get_sfnt_table('pclt')
if pclt is None:
# Some fonts don't store the xHeight, so we do a poor man's xHeight
metrics = self.get_metrics(font, 'it', 'x', fontsize, dpi)
return metrics.iceberg
xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0)
return xHeight
def get_underline_thickness(self, font, fontsize, dpi):
# This function used to grab underline thickness from the font
# metrics, but that information is just too un-reliable, so it
# is now hardcoded.
return ((0.75 / 12.0) * fontsize * dpi) / 72.0
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64.0
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
class BakomaFonts(TruetypeFonts):
"""
Use the Bakoma TrueType fonts for rendering.
Symbols are strewn about a number of font files, each of which has
its own proprietary 8-bit encoding.
"""
_fontmap = { 'cal' : 'cmsy10',
'rm' : 'cmr10',
'tt' : 'cmtt10',
'it' : 'cmmi10',
'bf' : 'cmb10',
'sf' : 'cmss10',
'ex' : 'cmex10'
}
fontmap = {}
def __init__(self, *args, **kwargs):
self._stix_fallback = StixFonts(*args, **kwargs)
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for key, val in self._fontmap.iteritems():
fullpath = findfont(val)
self.fontmap[key] = fullpath
self.fontmap[val] = fullpath
_slanted_symbols = set(r"\int \oint".split())
def _get_glyph(self, fontname, font_class, sym, fontsize):
symbol_name = None
if fontname in self.fontmap and sym in latex_to_bakoma:
basename, num = latex_to_bakoma[sym]
slanted = (basename == "cmmi10") or sym in self._slanted_symbols
try:
cached_font = self._get_font(basename)
except RuntimeError:
pass
else:
symbol_name = cached_font.font.get_glyph_name(num)
num = cached_font.glyphmap[num]
elif len(sym) == 1:
slanted = (fontname == "it")
try:
cached_font = self._get_font(fontname)
except RuntimeError:
pass
else:
num = ord(sym)
gid = cached_font.charmap.get(num)
if gid is not None:
symbol_name = cached_font.font.get_glyph_name(
cached_font.charmap[num])
if symbol_name is None:
return self._stix_fallback._get_glyph(
fontname, font_class, sym, fontsize)
return cached_font, num, symbol_name, fontsize, slanted
# The Bakoma fonts contain many pre-sized alternatives for the
# delimiters. The AutoSizedChar class will use these alternatives
# and select the best (closest sized) glyph.
_size_alternatives = {
'(' : [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'),
('ex', '\xb5'), ('ex', '\xc3')],
')' : [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'),
('ex', '\xb6'), ('ex', '\x21')],
'{' : [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'),
('ex', '\xbd'), ('ex', '\x28')],
'}' : [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'),
('ex', '\xbe'), ('ex', '\x29')],
# The fourth size of '[' is mysteriously missing from the BaKoMa
# font, so I've ommitted it for both '[' and ']'
'[' : [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'),
('ex', '\x22')],
']' : [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'),
('ex', '\x23')],
r'\lfloor' : [('ex', '\xa5'), ('ex', '\x6a'),
('ex', '\xb9'), ('ex', '\x24')],
r'\rfloor' : [('ex', '\xa6'), ('ex', '\x6b'),
('ex', '\xba'), ('ex', '\x25')],
r'\lceil' : [('ex', '\xa7'), ('ex', '\x6c'),
('ex', '\xbb'), ('ex', '\x26')],
r'\rceil' : [('ex', '\xa8'), ('ex', '\x6d'),
('ex', '\xbc'), ('ex', '\x27')],
r'\langle' : [('ex', '\xad'), ('ex', '\x44'),
('ex', '\xbf'), ('ex', '\x2a')],
r'\rangle' : [('ex', '\xae'), ('ex', '\x45'),
('ex', '\xc0'), ('ex', '\x2b')],
r'\__sqrt__' : [('ex', '\x70'), ('ex', '\x71'),
('ex', '\x72'), ('ex', '\x73')],
r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'),
('ex', '\xc2'), ('ex', '\x2d')],
r'/' : [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'),
('ex', '\xcb'), ('ex', '\x2c')],
r'\widehat' : [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'),
('ex', '\x64')],
r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'),
('ex', '\x67')],
r'<' : [('cal', 'h'), ('ex', 'D')],
r'>' : [('cal', 'i'), ('ex', 'E')]
}
for alias, target in [('\leftparen', '('),
('\rightparent', ')'),
('\leftbrace', '{'),
('\rightbrace', '}'),
('\leftbracket', '['),
('\rightbracket', ']')]:
_size_alternatives[alias] = _size_alternatives[target]
def get_sized_alternatives_for_symbol(self, fontname, sym):
return self._size_alternatives.get(sym, [(fontname, sym)])
class UnicodeFonts(TruetypeFonts):
"""
An abstract base class for handling Unicode fonts.
While some reasonably complete Unicode fonts (such as DejaVu) may
work in some situations, the only Unicode font I'm aware of with a
complete set of math symbols is STIX.
This class will "fallback" on the Bakoma fonts when a required
symbol can not be found in the font.
"""
fontmap = {}
use_cmex = True
def __init__(self, *args, **kwargs):
# This must come first so the backend's owner is set correctly
if rcParams['mathtext.fallback_to_cm']:
self.cm_fallback = BakomaFonts(*args, **kwargs)
else:
self.cm_fallback = None
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for texfont in "cal rm tt it bf sf".split():
prop = rcParams['mathtext.' + texfont]
font = findfont(prop)
self.fontmap[texfont] = font
prop = FontProperties('cmex10')
font = findfont(prop)
self.fontmap['ex'] = font
_slanted_symbols = set(r"\int \oint".split())
def _map_virtual_font(self, fontname, font_class, uniindex):
return fontname, uniindex
def _get_glyph(self, fontname, font_class, sym, fontsize):
found_symbol = False
if self.use_cmex:
uniindex = latex_to_cmex.get(sym)
if uniindex is not None:
fontname = 'ex'
found_symbol = True
if not found_symbol:
try:
uniindex = get_unicode_index(sym)
found_symbol = True
except ValueError:
uniindex = ord('?')
warn("No TeX to unicode mapping for '%s'" %
sym.encode('ascii', 'backslashreplace'),
MathTextWarning)
fontname, uniindex = self._map_virtual_font(
fontname, font_class, uniindex)
# Only characters in the "Letter" class should be italicized in 'it'
# mode. Greek capital letters should be Roman.
if found_symbol:
new_fontname = fontname
if fontname == 'it':
if uniindex < 0x10000:
unistring = unichr(uniindex)
if (not unicodedata.category(unistring)[0] == "L"
or unicodedata.name(unistring).startswith("GREEK CAPITAL")):
new_fontname = 'rm'
slanted = (new_fontname == 'it') or sym in self._slanted_symbols
found_symbol = False
try:
cached_font = self._get_font(new_fontname)
except RuntimeError:
pass
else:
try:
glyphindex = cached_font.charmap[uniindex]
found_symbol = True
except KeyError:
pass
if not found_symbol:
if self.cm_fallback:
warn("Substituting with a symbol from Computer Modern.",
MathTextWarning)
return self.cm_fallback._get_glyph(
fontname, 'it', sym, fontsize)
else:
if fontname == 'it' and isinstance(self, StixFonts):
return self._get_glyph('rm', font_class, sym, fontsize)
warn("Font '%s' does not have a glyph for '%s'" %
(fontname, sym.encode('ascii', 'backslashreplace')),
MathTextWarning)
warn("Substituting with a dummy symbol.", MathTextWarning)
fontname = 'rm'
new_fontname = fontname
cached_font = self._get_font(fontname)
uniindex = 0xA4 # currency character, for lack of anything better
glyphindex = cached_font.charmap[uniindex]
slanted = False
symbol_name = cached_font.font.get_glyph_name(glyphindex)
return cached_font, uniindex, symbol_name, fontsize, slanted
def get_sized_alternatives_for_symbol(self, fontname, sym):
if self.cm_fallback:
return self.cm_fallback.get_sized_alternatives_for_symbol(
fontname, sym)
return [(fontname, sym)]
class StixFonts(UnicodeFonts):
"""
A font handling class for the STIX fonts.
In addition to what UnicodeFonts provides, this class:
- supports "virtual fonts" which are complete alpha numeric
character sets with different font styles at special Unicode
code points, such as "Blackboard".
- handles sized alternative characters for the STIXSizeX fonts.
"""
_fontmap = { 'rm' : 'STIXGeneral',
'it' : 'STIXGeneral:italic',
'bf' : 'STIXGeneral:weight=bold',
'nonunirm' : 'STIXNonUnicode',
'nonuniit' : 'STIXNonUnicode:italic',
'nonunibf' : 'STIXNonUnicode:weight=bold',
0 : 'STIXGeneral',
1 : 'STIXSize1',
2 : 'STIXSize2',
3 : 'STIXSize3',
4 : 'STIXSize4',
5 : 'STIXSize5'
}
fontmap = {}
use_cmex = False
cm_fallback = False
_sans = False
def __init__(self, *args, **kwargs):
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for key, name in self._fontmap.iteritems():
fullpath = findfont(name)
self.fontmap[key] = fullpath
self.fontmap[name] = fullpath
def _map_virtual_font(self, fontname, font_class, uniindex):
# Handle these "fonts" that are actually embedded in
# other fonts.
mapping = stix_virtual_fonts.get(fontname)
if self._sans and mapping is None:
mapping = stix_virtual_fonts['sf']
doing_sans_conversion = True
else:
doing_sans_conversion = False
if mapping is not None:
if isinstance(mapping, dict):
mapping = mapping[font_class]
# Binary search for the source glyph
lo = 0
hi = len(mapping)
while lo < hi:
mid = (lo+hi)//2
range = mapping[mid]
if uniindex < range[0]:
hi = mid
elif uniindex <= range[1]:
break
else:
lo = mid + 1
if uniindex >= range[0] and uniindex <= range[1]:
uniindex = uniindex - range[0] + range[3]
fontname = range[2]
elif not doing_sans_conversion:
# This will generate a dummy character
uniindex = 0x1
fontname = 'it'
# Handle private use area glyphs
if (fontname in ('it', 'rm', 'bf') and
uniindex >= 0xe000 and uniindex <= 0xf8ff):
fontname = 'nonuni' + fontname
return fontname, uniindex
_size_alternatives = {}
def get_sized_alternatives_for_symbol(self, fontname, sym):
alternatives = self._size_alternatives.get(sym)
if alternatives:
return alternatives
alternatives = []
try:
uniindex = get_unicode_index(sym)
except ValueError:
return [(fontname, sym)]
fix_ups = {
ord('<'): 0x27e8,
ord('>'): 0x27e9 }
uniindex = fix_ups.get(uniindex, uniindex)
for i in range(6):
cached_font = self._get_font(i)
glyphindex = cached_font.charmap.get(uniindex)
if glyphindex is not None:
alternatives.append((i, unichr(uniindex)))
self._size_alternatives[sym] = alternatives
return alternatives
class StixSansFonts(StixFonts):
"""
A font handling class for the STIX fonts (that uses sans-serif
characters by default).
"""
_sans = True
class StandardPsFonts(Fonts):
"""
Use the standard postscript fonts for rendering to backend_ps
Unlike the other font classes, BakomaFont and UnicodeFont, this
one requires the Ps backend.
"""
basepath = os.path.join( get_data_path(), 'fonts', 'afm' )
fontmap = { 'cal' : 'pzcmi8a', # Zapf Chancery
'rm' : 'pncr8a', # New Century Schoolbook
'tt' : 'pcrr8a', # Courier
'it' : 'pncri8a', # New Century Schoolbook Italic
'sf' : 'phvr8a', # Helvetica
'bf' : 'pncb8a', # New Century Schoolbook Bold
None : 'psyr' # Symbol
}
def __init__(self, default_font_prop):
Fonts.__init__(self, default_font_prop, MathtextBackendPs())
self.glyphd = {}
self.fonts = {}
filename = findfont(default_font_prop, fontext='afm')
default_font = AFM(file(filename, 'r'))
default_font.fname = filename
self.fonts['default'] = default_font
self.pswriter = StringIO()
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self.fonts.get(basename)
if cached_font is None:
fname = os.path.join(self.basepath, basename + ".afm")
cached_font = AFM(file(fname, 'r'))
cached_font.fname = fname
self.fonts[basename] = cached_font
self.fonts[cached_font.get_fontname()] = cached_font
return cached_font
def _get_info (self, fontname, font_class, sym, fontsize, dpi):
'load the cmfont, metrics and glyph with caching'
key = fontname, sym, fontsize, dpi
tup = self.glyphd.get(key)
if tup is not None:
return tup
# Only characters in the "Letter" class should really be italicized.
# This class includes greek letters, so we're ok
if (fontname == 'it' and
(len(sym) > 1 or
not unicodedata.category(unicode(sym)).startswith("L"))):
fontname = 'rm'
found_symbol = False
if sym in latex_to_standard:
fontname, num = latex_to_standard[sym]
glyph = chr(num)
found_symbol = True
elif len(sym) == 1:
glyph = sym
num = ord(glyph)
found_symbol = True
else:
warn("No TeX to built-in Postscript mapping for '%s'" % sym,
MathTextWarning)
slanted = (fontname == 'it')
font = self._get_font(fontname)
if found_symbol:
try:
symbol_name = font.get_name_char(glyph)
except KeyError:
warn("No glyph in standard Postscript font '%s' for '%s'" %
(font.postscript_name, sym),
MathTextWarning)
found_symbol = False
if not found_symbol:
glyph = sym = '?'
num = ord(glyph)
symbol_name = font.get_name_char(glyph)
offset = 0
scale = 0.001 * fontsize
xmin, ymin, xmax, ymax = [val * scale
for val in font.get_bbox_char(glyph)]
metrics = Bunch(
advance = font.get_width_char(glyph) * scale,
width = font.get_width_char(glyph) * scale,
height = font.get_height_char(glyph) * scale,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = ymax + offset,
slanted = slanted
)
self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.get_fontname(),
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return self.glyphd[key]
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return (font.get_kern_dist(info1.glyph, info2.glyph)
* 0.001 * fontsize1)
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_xheight() * 0.001 * fontsize
def get_underline_thickness(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_underline_thickness() * 0.001 * fontsize
##############################################################################
# TeX-LIKE BOX MODEL
# The following is based directly on the document 'woven' from the
# TeX82 source code. This information is also available in printed
# form:
#
# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B:
# TeX: The Program. Addison-Wesley Professional.
#
# The most relevant "chapters" are:
# Data structures for boxes and their friends
# Shipping pages out (Ship class)
# Packaging (hpack and vpack)
# Data structures for math mode
# Subroutines for math mode
# Typesetting math formulas
#
# Many of the docstrings below refer to a numbered "node" in that
# book, e.g. node123
#
# Note that (as TeX) y increases downward, unlike many other parts of
# matplotlib.
# How much text shrinks when going to the next-smallest level. GROW_FACTOR
# must be the inverse of SHRINK_FACTOR.
SHRINK_FACTOR = 0.7
GROW_FACTOR = 1.0 / SHRINK_FACTOR
# The number of different sizes of chars to use, beyond which they will not
# get any smaller
NUM_SIZE_LEVELS = 4
# Percentage of x-height of additional horiz. space after sub/superscripts
SCRIPT_SPACE = 0.2
# Percentage of x-height that sub/superscripts drop below the baseline
SUBDROP = 0.3
# Percentage of x-height that superscripts drop below the baseline
SUP1 = 0.5
# Percentage of x-height that subscripts drop below the baseline
SUB1 = 0.0
# Percentage of x-height that superscripts are offset relative to the subscript
DELTA = 0.18
class MathTextWarning(Warning):
pass
class Node(object):
"""
A node in the TeX box model
"""
def __init__(self):
self.size = 0
def __repr__(self):
return self.__internal_repr__()
def __internal_repr__(self):
return self.__class__.__name__
def get_kerning(self, next):
return 0.0
def shrink(self):
"""
Shrinks one level smaller. There are only three levels of
sizes, after which things will no longer get smaller.
"""
self.size += 1
def grow(self):
"""
Grows one level larger. There is no limit to how big
something can get.
"""
self.size -= 1
def render(self, x, y):
pass
class Box(Node):
"""
Represents any node with a physical location.
"""
def __init__(self, width, height, depth):
Node.__init__(self)
self.width = width
self.height = height
self.depth = depth
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
def render(self, x1, y1, x2, y2):
pass
class Vbox(Box):
"""
A box with only height (zero width).
"""
def __init__(self, height, depth):
Box.__init__(self, 0., height, depth)
class Hbox(Box):
"""
A box with only width (zero height and depth).
"""
def __init__(self, width):
Box.__init__(self, width, 0., 0.)
class Char(Node):
"""
Represents a single character. Unlike TeX, the font information
and metrics are stored with each :class:`Char` to make it easier
to lookup the font metrics when needed. Note that TeX boxes have
a width, height, and depth, unlike Type1 and Truetype which use a
full bounding box and an advance in the x-direction. The metrics
must be converted to the TeX way, and the advance (if different
from width) must be converted into a :class:`Kern` node when the
:class:`Char` is added to its parent :class:`Hlist`.
"""
def __init__(self, c, state):
Node.__init__(self)
self.c = c
self.font_output = state.font_output
assert isinstance(state.font, (str, unicode, int))
self.font = state.font
self.font_class = state.font_class
self.fontsize = state.fontsize
self.dpi = state.dpi
# The real width, height and depth will be set during the
# pack phase, after we know the real fontsize
self._update_metrics()
def __internal_repr__(self):
return '`%s`' % self.c
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
if self.c == ' ':
self.width = metrics.advance
else:
self.width = metrics.width
self.height = metrics.iceberg
self.depth = -(metrics.iceberg - metrics.height)
def is_slanted(self):
return self._metrics.slanted
def get_kerning(self, next):
"""
Return the amount of kerning between this and the given
character. Called when characters are strung together into
:class:`Hlist` to create :class:`Kern` nodes.
"""
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.font_output.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
def render(self, x, y):
"""
Render the character to the canvas
"""
self.font_output.render_glyph(
x, y,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.fontsize *= SHRINK_FACTOR
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.fontsize *= GROW_FACTOR
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
class Accent(Char):
"""
The font metrics need to be dealt with differently for accents,
since they are already offset correctly from the baseline in
TrueType fonts.
"""
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
self.width = metrics.xmax - metrics.xmin
self.height = metrics.ymax - metrics.ymin
self.depth = 0
def shrink(self):
Char.shrink(self)
self._update_metrics()
def grow(self):
Char.grow(self)
self._update_metrics()
def render(self, x, y):
"""
Render the character to the canvas.
"""
self.font_output.render_glyph(
x - self._metrics.xmin, y + self._metrics.ymin,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
class List(Box):
"""
A list of nodes (either horizontal or vertical).
"""
def __init__(self, elements):
Box.__init__(self, 0., 0., 0.)
self.shift_amount = 0. # An arbitrary offset
self.children = elements # The child nodes of this list
# The following parameters are set in the vpack and hpack functions
self.glue_set = 0. # The glue setting of this list
self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
self.glue_order = 0 # The order of infinity (0 - 3) for the glue
def __repr__(self):
return '[%s <%.02f %.02f %.02f %.02f> %s]' % (
self.__internal_repr__(),
self.width, self.height,
self.depth, self.shift_amount,
' '.join([repr(x) for x in self.children]))
def _determine_order(self, totals):
"""
A helper function to determine the highest order of glue
used by the members of this list. Used by vpack and hpack.
"""
o = 0
for i in range(len(totals) - 1, 0, -1):
if totals[i] != 0.0:
o = i
break
return o
def _set_glue(self, x, sign, totals, error_type):
o = self._determine_order(totals)
self.glue_order = o
self.glue_sign = sign
if totals[o] != 0.:
self.glue_set = x / totals[o]
else:
self.glue_sign = 0
self.glue_ratio = 0.
if o == 0:
if len(self.children):
warn("%s %s: %r" % (error_type, self.__class__.__name__, self),
MathTextWarning)
def shrink(self):
for child in self.children:
child.shrink()
Box.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.shift_amount *= SHRINK_FACTOR
self.glue_set *= SHRINK_FACTOR
def grow(self):
for child in self.children:
child.grow()
Box.grow(self)
self.shift_amount *= GROW_FACTOR
self.glue_set *= GROW_FACTOR
class Hlist(List):
"""
A horizontal list of boxes.
"""
def __init__(self, elements, w=0., m='additional', do_kern=True):
List.__init__(self, elements)
if do_kern:
self.kern()
self.hpack()
def kern(self):
"""
Insert :class:`Kern` nodes between :class:`Char` nodes to set
kerning. The :class:`Char` nodes themselves determine the
amount of kerning they need (in :meth:`~Char.get_kerning`),
and this function just creates the linked list in the correct
way.
"""
new_children = []
num_children = len(self.children)
if num_children:
for i in range(num_children):
elem = self.children[i]
if i < num_children - 1:
next = self.children[i + 1]
else:
next = None
new_children.append(elem)
kerning_distance = elem.get_kerning(next)
if kerning_distance != 0.:
kern = Kern(kerning_distance)
new_children.append(kern)
self.children = new_children
# This is a failed experiment to fake cross-font kerning.
# def get_kerning(self, next):
# if len(self.children) >= 2 and isinstance(self.children[-2], Char):
# if isinstance(next, Char):
# print "CASE A"
# return self.children[-2].get_kerning(next)
# elif isinstance(next, Hlist) and len(next.children) and isinstance(next.children[0], Char):
# print "CASE B"
# result = self.children[-2].get_kerning(next.children[0])
# print result
# return result
# return 0.0
def hpack(self, w=0., m='additional'):
"""
The main duty of :meth:`hpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified. The computed sizes normally
enclose all of the material inside the new box; but some items
may stick out if negative glue is used, if the box is
overfull, or if a ``\\vbox`` includes other boxes that have
been shifted left.
- *w*: specifies a width
- *m*: is either 'exactly' or 'additional'.
Thus, ``hpack(w, 'exactly')`` produces a box whose width is
exactly *w*, while ``hpack(w, 'additional')`` yields a box
whose width is the natural width plus *w*. The default values
produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
#self.shift_amount = 0.
h = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Char):
x += p.width
h = max(h, p.height)
d = max(d, p.depth)
elif isinstance(p, Box):
x += p.width
if not isinf(p.height) and not isinf(p.depth):
s = getattr(p, 'shift_amount', 0.)
h = max(h, p.height - s)
d = max(d, p.depth + s)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += p.width
self.height = h
self.depth = d
if m == 'additional':
w += x
self.width = w
x = w - x
if x == 0.:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Vlist(List):
"""
A vertical list of boxes.
"""
def __init__(self, elements, h=0., m='additional'):
List.__init__(self, elements)
self.vpack()
def vpack(self, h=0., m='additional', l=float(inf)):
"""
The main duty of :meth:`vpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified.
- *h*: specifies a height
- *m*: is either 'exactly' or 'additional'.
- *l*: a maximum height
Thus, ``vpack(h, 'exactly')`` produces a box whose height is
exactly *h*, while ``vpack(h, 'additional')`` yields a box
whose height is the natural height plus *h*. The default
values produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
# self.shift_amount = 0.
w = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Box):
x += d + p.height
d = p.depth
if not isinf(p.width):
s = getattr(p, 'shift_amount', 0.)
w = max(w, p.width + s)
elif isinstance(p, Glue):
x += d
d = 0.
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += d + p.width
d = 0.
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in Vlist.")
self.width = w
if d > l:
x += d - l
self.depth = l
else:
self.depth = d
if m == 'additional':
h += x
self.height = h
x = h - x
if x == 0:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Rule(Box):
"""
A :class:`Rule` node stands for a solid black rectangle; it has
*width*, *depth*, and *height* fields just as in an
:class:`Hlist`. However, if any of these dimensions is inf, the
actual value will be determined by running the rule up to the
boundary of the innermost enclosing box. This is called a "running
dimension." The width is never running in an :class:`Hlist`; the
height and depth are never running in a :class:`Vlist`.
"""
def __init__(self, width, height, depth, state):
Box.__init__(self, width, height, depth)
self.font_output = state.font_output
def render(self, x, y, w, h):
self.font_output.render_rect_filled(x, y, x + w, y + h)
class Hrule(Rule):
"""
Convenience class to create a horizontal rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = depth = thickness * 0.5
Rule.__init__(self, inf, height, depth, state)
class Vrule(Rule):
"""
Convenience class to create a vertical rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
Rule.__init__(self, thickness, inf, inf, state)
class Glue(Node):
"""
Most of the information in this object is stored in the underlying
:class:`GlueSpec` class, which is shared between multiple glue objects. (This
is a memory optimization which probably doesn't matter anymore, but it's
easier to stick to what TeX does.)
"""
def __init__(self, glue_type, copy=False):
Node.__init__(self)
self.glue_subtype = 'normal'
if is_string_like(glue_type):
glue_spec = GlueSpec.factory(glue_type)
elif isinstance(glue_type, GlueSpec):
glue_spec = glue_type
else:
raise ArgumentError("glue_type must be a glue spec name or instance.")
if copy:
glue_spec = glue_spec.copy()
self.glue_spec = glue_spec
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= GROW_FACTOR
class GlueSpec(object):
"""
See :class:`Glue`.
"""
def __init__(self, width=0., stretch=0., stretch_order=0, shrink=0., shrink_order=0):
self.width = width
self.stretch = stretch
self.stretch_order = stretch_order
self.shrink = shrink
self.shrink_order = shrink_order
def copy(self):
return GlueSpec(
self.width,
self.stretch,
self.stretch_order,
self.shrink,
self.shrink_order)
def factory(cls, glue_type):
return cls._types[glue_type]
factory = classmethod(factory)
GlueSpec._types = {
'fil': GlueSpec(0., 1., 1, 0., 0),
'fill': GlueSpec(0., 1., 2, 0., 0),
'filll': GlueSpec(0., 1., 3, 0., 0),
'neg_fil': GlueSpec(0., 0., 0, 1., 1),
'neg_fill': GlueSpec(0., 0., 0, 1., 2),
'neg_filll': GlueSpec(0., 0., 0, 1., 3),
'empty': GlueSpec(0., 0., 0, 0., 0),
'ss': GlueSpec(0., 1., 1, -1., 1)
}
# Some convenient ways to get common kinds of glue
class Fil(Glue):
def __init__(self):
Glue.__init__(self, 'fil')
class Fill(Glue):
def __init__(self):
Glue.__init__(self, 'fill')
class Filll(Glue):
def __init__(self):
Glue.__init__(self, 'filll')
class NegFil(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fil')
class NegFill(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fill')
class NegFilll(Glue):
def __init__(self):
Glue.__init__(self, 'neg_filll')
class SsGlue(Glue):
def __init__(self):
Glue.__init__(self, 'ss')
class HCentered(Hlist):
"""
A convenience class to create an :class:`Hlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Hlist.__init__(self, [SsGlue()] + elements + [SsGlue()],
do_kern=False)
class VCentered(Hlist):
"""
A convenience class to create a :class:`Vlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Vlist.__init__(self, [SsGlue()] + elements + [SsGlue()])
class Kern(Node):
"""
A :class:`Kern` node has a width field to specify a (normally
negative) amount of spacing. This spacing correction appears in
horizontal lists between letters like A and V when the font
designer said that it looks better to move them closer together or
further apart. A kern node can also appear in a vertical list,
when its *width* denotes additional spacing in the vertical
direction.
"""
def __init__(self, width):
Node.__init__(self)
self.width = width
def __repr__(self):
return "k%.02f" % self.width
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
class SubSuperCluster(Hlist):
"""
:class:`SubSuperCluster` is a sort of hack to get around that fact
that this code do a two-pass parse like TeX. This lets us store
enough information in the hlist itself, namely the nucleus, sub-
and super-script, such that if another script follows that needs
to be attached, it can be reconfigured on the fly.
"""
def __init__(self):
self.nucleus = None
self.sub = None
self.super = None
Hlist.__init__(self, [])
class AutoHeightChar(Hlist):
"""
:class:`AutoHeightChar` will create a character as close to the
given height and depth as possible. When using a font with
multiple height versions of some characters (such as the BaKoMa
fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c, height, depth, state, always=False):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
target_total = height + depth
for fontname, sym in alternatives:
state.font = fontname
char = Char(sym, state)
if char.height + char.depth >= target_total:
break
factor = target_total / (char.height + char.depth)
state.fontsize *= factor
char = Char(sym, state)
shift = (depth - char.depth)
Hlist.__init__(self, [char])
self.shift_amount = shift
class AutoWidthChar(Hlist):
"""
:class:`AutoWidthChar` will create a character as close to the
given width as possible. When using a font with multiple width
versions of some characters (such as the BaKoMa fonts), the
correct glyph will be selected, otherwise this will always just
return a scaled version of the glyph.
"""
def __init__(self, c, width, state, always=False, char_class=Char):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
for fontname, sym in alternatives:
state.font = fontname
char = char_class(sym, state)
if char.width >= width:
break
factor = width / char.width
state.fontsize *= factor
char = char_class(sym, state)
Hlist.__init__(self, [char])
self.width = char.width
class Ship(object):
"""
Once the boxes have been set up, this sends them to output. Since
boxes can be inside of boxes inside of boxes, the main work of
:class:`Ship` is done by two mutually recursive routines,
:meth:`hlist_out` and :meth:`vlist_out`, which traverse the
:class:`Hlist` nodes and :class:`Vlist` nodes inside of horizontal
and vertical boxes. The global variables used in TeX to store
state as it processes have become member variables here.
"""
def __call__(self, ox, oy, box):
self.max_push = 0 # Deepest nesting of push commands so far
self.cur_s = 0
self.cur_v = 0.
self.cur_h = 0.
self.off_h = ox
self.off_v = oy + box.height
self.hlist_out(box)
def clamp(value):
if value < -1000000000.:
return -1000000000.
if value > 1000000000.:
return 1000000000.
return value
clamp = staticmethod(clamp)
def hlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
base_line = self.cur_v
left_edge = self.cur_h
self.cur_s += 1
self.max_push = max(self.cur_s, self.max_push)
clamp = self.clamp
for p in box.children:
if isinstance(p, Char):
p.render(self.cur_h + self.off_h, self.cur_v + self.off_v)
self.cur_h += p.width
elif isinstance(p, Kern):
self.cur_h += p.width
elif isinstance(p, List):
# node623
if len(p.children) == 0:
self.cur_h += p.width
else:
edge = self.cur_h
self.cur_v = base_line + p.shift_amount
if isinstance(p, Hlist):
self.hlist_out(p)
else:
# p.vpack(box.height + box.depth, 'exactly')
self.vlist_out(p)
self.cur_h = edge + p.width
self.cur_v = base_line
elif isinstance(p, Box):
# node624
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_height):
rule_height = box.height
if isinf(rule_depth):
rule_depth = box.depth
if rule_height > 0 and rule_width > 0:
self.cur_v = baseline + rule_depth
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
self.cur_v = baseline
self.cur_h += rule_width
elif isinstance(p, Glue):
# node625
glue_spec = p.glue_spec
rule_width = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order:
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_width += cur_g
self.cur_h += rule_width
self.cur_s -= 1
def vlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
self.cur_s += 1
self.max_push = max(self.max_push, self.cur_s)
left_edge = self.cur_h
self.cur_v -= box.height
top_edge = self.cur_v
clamp = self.clamp
for p in box.children:
if isinstance(p, Kern):
self.cur_v += p.width
elif isinstance(p, List):
if len(p.children) == 0:
self.cur_v += p.height + p.depth
else:
self.cur_v += p.height
self.cur_h = left_edge + p.shift_amount
save_v = self.cur_v
p.width = box.width
if isinstance(p, Hlist):
self.hlist_out(p)
else:
self.vlist_out(p)
self.cur_v = save_v + p.depth
self.cur_h = left_edge
elif isinstance(p, Box):
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_width):
rule_width = box.width
rule_height += rule_depth
if rule_height > 0 and rule_depth > 0:
self.cur_v += rule_height
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
rule_height = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order: # shrinking
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_height += cur_g
self.cur_v += rule_height
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in vlist")
self.cur_s -= 1
ship = Ship()
##############################################################################
# PARSER
def Error(msg):
"""
Helper class to raise parser errors.
"""
def raise_error(s, loc, toks):
raise ParseFatalException(msg + "\n" + s)
empty = Empty()
empty.setParseAction(raise_error)
return empty
class Parser(object):
"""
This is the pyparsing-based parser for math expressions. It
actually parses full strings *containing* math expressions, in
that raw text may also appear outside of pairs of ``$``.
The grammar is based directly on that in TeX, though it cuts a few
corners.
"""
_binary_operators = set(r'''
+ *
\pm \sqcap \rhd
\mp \sqcup \unlhd
\times \vee \unrhd
\div \wedge \oplus
\ast \setminus \ominus
\star \wr \otimes
\circ \diamond \oslash
\bullet \bigtriangleup \odot
\cdot \bigtriangledown \bigcirc
\cap \triangleleft \dagger
\cup \triangleright \ddagger
\uplus \lhd \amalg'''.split())
_relation_symbols = set(r'''
= < > :
\leq \geq \equiv \models
\prec \succ \sim \perp
\preceq \succeq \simeq \mid
\ll \gg \asymp \parallel
\subset \supset \approx \bowtie
\subseteq \supseteq \cong \Join
\sqsubset \sqsupset \neq \smile
\sqsubseteq \sqsupseteq \doteq \frown
\in \ni \propto
\vdash \dashv'''.split())
_arrow_symbols = set(r'''
\leftarrow \longleftarrow \uparrow
\Leftarrow \Longleftarrow \Uparrow
\rightarrow \longrightarrow \downarrow
\Rightarrow \Longrightarrow \Downarrow
\leftrightarrow \longleftrightarrow \updownarrow
\Leftrightarrow \Longleftrightarrow \Updownarrow
\mapsto \longmapsto \nearrow
\hookleftarrow \hookrightarrow \searrow
\leftharpoonup \rightharpoonup \swarrow
\leftharpoondown \rightharpoondown \nwarrow
\rightleftharpoons \leadsto'''.split())
_spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols
_punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split())
_overunder_symbols = set(r'''
\sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee
\bigwedge \bigodot \bigotimes \bigoplus \biguplus
'''.split())
_overunder_functions = set(
r"lim liminf limsup sup max min".split())
_dropsub_symbols = set(r'''\int \oint'''.split())
_fontnames = set("rm cal it tt sf bf default bb frak circled scr".split())
_function_names = set("""
arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim
liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan
coth inf max tanh""".split())
_ambiDelim = set(r"""
| \| / \backslash \uparrow \downarrow \updownarrow \Uparrow
\Downarrow \Updownarrow .""".split())
_leftDelim = set(r"( [ { < \lfloor \langle \lceil".split())
_rightDelim = set(r") ] } > \rfloor \rangle \rceil".split())
def __init__(self):
# All forward declarations are here
font = Forward().setParseAction(self.font).setName("font")
latexfont = Forward()
subsuper = Forward().setParseAction(self.subsuperscript).setName("subsuper")
placeable = Forward().setName("placeable")
simple = Forward().setName("simple")
autoDelim = Forward().setParseAction(self.auto_sized_delimiter)
self._expression = Forward().setParseAction(self.finish).setName("finish")
float = Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)")
lbrace = Literal('{').suppress()
rbrace = Literal('}').suppress()
start_group = (Optional(latexfont) - lbrace)
start_group.setParseAction(self.start_group)
end_group = rbrace.copy()
end_group.setParseAction(self.end_group)
bslash = Literal('\\')
accent = oneOf(self._accent_map.keys() +
list(self._wide_accents))
function = oneOf(list(self._function_names))
fontname = oneOf(list(self._fontnames))
latex2efont = oneOf(['math' + x for x in self._fontnames])
space =(FollowedBy(bslash)
+ oneOf([r'\ ',
r'\/',
r'\,',
r'\;',
r'\quad',
r'\qquad',
r'\!'])
).setParseAction(self.space).setName('space')
customspace =(Literal(r'\hspace')
- (( lbrace
- float
- rbrace
) | Error(r"Expected \hspace{n}"))
).setParseAction(self.customspace).setName('customspace')
unicode_range = u"\U00000080-\U0001ffff"
symbol =(Regex(UR"([a-zA-Z0-9 +\-*/<>=:,.;!'@()\[\]|%s])|(\\[%%${}\[\]_|])" % unicode_range)
| (Combine(
bslash
+ oneOf(tex2uni.keys())
) + FollowedBy(Regex("[^a-zA-Z]")))
).setParseAction(self.symbol).leaveWhitespace()
c_over_c =(Suppress(bslash)
+ oneOf(self._char_over_chars.keys())
).setParseAction(self.char_over_chars)
accent = Group(
Suppress(bslash)
+ accent
- placeable
).setParseAction(self.accent).setName("accent")
function =(Suppress(bslash)
+ function
).setParseAction(self.function).setName("function")
group = Group(
start_group
+ ZeroOrMore(
autoDelim
^ simple)
- end_group
).setParseAction(self.group).setName("group")
font <<(Suppress(bslash)
+ fontname)
latexfont <<(Suppress(bslash)
+ latex2efont)
frac = Group(
Suppress(Literal(r"\frac"))
+ ((group + group)
| Error(r"Expected \frac{num}{den}"))
).setParseAction(self.frac).setName("frac")
sqrt = Group(
Suppress(Literal(r"\sqrt"))
+ Optional(
Suppress(Literal("["))
- Regex("[0-9]+")
- Suppress(Literal("]")),
default = None
)
+ (group | Error("Expected \sqrt{value}"))
).setParseAction(self.sqrt).setName("sqrt")
placeable <<(accent
^ function
^ (c_over_c | symbol)
^ group
^ frac
^ sqrt
)
simple <<(space
| customspace
| font
| subsuper
)
subsuperop = oneOf(["_", "^"])
subsuper << Group(
( Optional(placeable)
+ OneOrMore(
subsuperop
- placeable
)
)
| placeable
)
ambiDelim = oneOf(list(self._ambiDelim))
leftDelim = oneOf(list(self._leftDelim))
rightDelim = oneOf(list(self._rightDelim))
autoDelim <<(Suppress(Literal(r"\left"))
+ ((leftDelim | ambiDelim) | Error("Expected a delimiter"))
+ Group(
autoDelim
^ OneOrMore(simple))
+ Suppress(Literal(r"\right"))
+ ((rightDelim | ambiDelim) | Error("Expected a delimiter"))
)
math = OneOrMore(
autoDelim
^ simple
).setParseAction(self.math).setName("math")
math_delim = ~bslash + Literal('$')
non_math = Regex(r"(?:(?:\\[$])|[^$])*"
).setParseAction(self.non_math).setName("non_math").leaveWhitespace()
self._expression << (
non_math
+ ZeroOrMore(
Suppress(math_delim)
+ Optional(math)
+ (Suppress(math_delim)
| Error("Expected end of math '$'"))
+ non_math
)
) + StringEnd()
self.clear()
def clear(self):
"""
Clear any state before parsing.
"""
self._expr = None
self._state_stack = None
self._em_width_cache = {}
def parse(self, s, fonts_object, fontsize, dpi):
"""
Parse expression *s* using the given *fonts_object* for
output, at the given *fontsize* and *dpi*.
Returns the parse tree of :class:`Node` instances.
"""
self._state_stack = [self.State(fonts_object, 'default', 'rm', fontsize, dpi)]
try:
self._expression.parseString(s)
except ParseException, err:
raise ValueError("\n".join([
"",
err.line,
" " * (err.column - 1) + "^",
str(err)]))
return self._expr
# The state of the parser is maintained in a stack. Upon
# entering and leaving a group { } or math/non-math, the stack
# is pushed and popped accordingly. The current state always
# exists in the top element of the stack.
class State(object):
"""
Stores the state of the parser.
States are pushed and popped from a stack as necessary, and
the "current" state is always at the top of the stack.
"""
def __init__(self, font_output, font, font_class, fontsize, dpi):
self.font_output = font_output
self._font = font
self.font_class = font_class
self.fontsize = fontsize
self.dpi = dpi
def copy(self):
return Parser.State(
self.font_output,
self.font,
self.font_class,
self.fontsize,
self.dpi)
def _get_font(self):
return self._font
def _set_font(self, name):
if name in ('it', 'rm', 'bf'):
self.font_class = name
self._font = name
font = property(_get_font, _set_font)
def get_state(self):
"""
Get the current :class:`State` of the parser.
"""
return self._state_stack[-1]
def pop_state(self):
"""
Pop a :class:`State` off of the stack.
"""
self._state_stack.pop()
def push_state(self):
"""
Push a new :class:`State` onto the stack which is just a copy
of the current state.
"""
self._state_stack.append(self.get_state().copy())
def finish(self, s, loc, toks):
#~ print "finish", toks
self._expr = Hlist(toks)
return [self._expr]
def math(self, s, loc, toks):
#~ print "math", toks
hlist = Hlist(toks)
self.pop_state()
return [hlist]
def non_math(self, s, loc, toks):
#~ print "non_math", toks
s = toks[0].replace(r'\$', '$')
symbols = [Char(c, self.get_state()) for c in s]
hlist = Hlist(symbols)
# We're going into math now, so set font to 'it'
self.push_state()
self.get_state().font = 'it'
return [hlist]
def _make_space(self, percentage):
# All spaces are relative to em width
state = self.get_state()
key = (state.font, state.fontsize, state.dpi)
width = self._em_width_cache.get(key)
if width is None:
metrics = state.font_output.get_metrics(
state.font, 'it', 'm', state.fontsize, state.dpi)
width = metrics.advance
self._em_width_cache[key] = width
return Kern(width * percentage)
_space_widths = { r'\ ' : 0.3,
r'\,' : 0.4,
r'\;' : 0.8,
r'\quad' : 1.6,
r'\qquad' : 3.2,
r'\!' : -0.4,
r'\/' : 0.4 }
def space(self, s, loc, toks):
assert(len(toks)==1)
num = self._space_widths[toks[0]]
box = self._make_space(num)
return [box]
def customspace(self, s, loc, toks):
return [self._make_space(float(toks[1]))]
def symbol(self, s, loc, toks):
# print "symbol", toks
c = toks[0]
try:
char = Char(c, self.get_state())
except ValueError:
raise ParseFatalException("Unknown symbol: %s" % c)
if c in self._spaced_symbols:
return [Hlist( [self._make_space(0.2),
char,
self._make_space(0.2)] ,
do_kern = False)]
elif c in self._punctuation_symbols:
return [Hlist( [char,
self._make_space(0.2)] ,
do_kern = False)]
return [char]
_char_over_chars = {
# The first 2 entires in the tuple are (font, char, sizescale) for
# the two symbols under and over. The third element is the space
# (in multiples of underline height)
r'AA' : ( ('rm', 'A', 1.0), (None, '\circ', 0.5), 0.0),
}
def char_over_chars(self, s, loc, toks):
sym = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
under_desc, over_desc, space = \
self._char_over_chars.get(sym, (None, None, 0.0))
if under_desc is None:
raise ParseFatalException("Error parsing symbol")
over_state = state.copy()
if over_desc[0] is not None:
over_state.font = over_desc[0]
over_state.fontsize *= over_desc[2]
over = Accent(over_desc[1], over_state)
under_state = state.copy()
if under_desc[0] is not None:
under_state.font = under_desc[0]
under_state.fontsize *= under_desc[2]
under = Char(under_desc[1], under_state)
width = max(over.width, under.width)
over_centered = HCentered([over])
over_centered.hpack(width, 'exactly')
under_centered = HCentered([under])
under_centered.hpack(width, 'exactly')
return Vlist([
over_centered,
Vbox(0., thickness * space),
under_centered
])
_accent_map = {
r'hat' : r'\circumflexaccent',
r'breve' : r'\combiningbreve',
r'bar' : r'\combiningoverline',
r'grave' : r'\combininggraveaccent',
r'acute' : r'\combiningacuteaccent',
r'ddot' : r'\combiningdiaeresis',
r'tilde' : r'\combiningtilde',
r'dot' : r'\combiningdotabove',
r'vec' : r'\combiningrightarrowabove',
r'"' : r'\combiningdiaeresis',
r"`" : r'\combininggraveaccent',
r"'" : r'\combiningacuteaccent',
r'~' : r'\combiningtilde',
r'.' : r'\combiningdotabove',
r'^' : r'\circumflexaccent'
}
_wide_accents = set(r"widehat widetilde".split())
def accent(self, s, loc, toks):
assert(len(toks)==1)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
if len(toks[0]) != 2:
raise ParseFatalException("Error parsing accent")
accent, sym = toks[0]
if accent in self._wide_accents:
accent = AutoWidthChar(
'\\' + accent, sym.width, state, char_class=Accent)
else:
accent = Accent(self._accent_map[accent], state)
centered = HCentered([accent])
centered.hpack(sym.width, 'exactly')
return Vlist([
centered,
Vbox(0., thickness * 2.0),
Hlist([sym])
])
def function(self, s, loc, toks):
#~ print "function", toks
self.push_state()
state = self.get_state()
state.font = 'rm'
hlist = Hlist([Char(c, state) for c in toks[0]])
self.pop_state()
hlist.function_name = toks[0]
return hlist
def start_group(self, s, loc, toks):
self.push_state()
# Deal with LaTeX-style font tokens
if len(toks):
self.get_state().font = toks[0][4:]
return []
def group(self, s, loc, toks):
grp = Hlist(toks[0])
return [grp]
def end_group(self, s, loc, toks):
self.pop_state()
return []
def font(self, s, loc, toks):
assert(len(toks)==1)
name = toks[0]
self.get_state().font = name
return []
def is_overunder(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._overunder_symbols
elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'):
return nucleus.function_name in self._overunder_functions
return False
def is_dropsub(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._dropsub_symbols
return False
def is_slanted(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.is_slanted()
return False
def subsuperscript(self, s, loc, toks):
assert(len(toks)==1)
# print 'subsuperscript', toks
nucleus = None
sub = None
super = None
if len(toks[0]) == 1:
return toks[0].asList()
elif len(toks[0]) == 2:
op, next = toks[0]
nucleus = Hbox(0.0)
if op == '_':
sub = next
else:
super = next
elif len(toks[0]) == 3:
nucleus, op, next = toks[0]
if op == '_':
sub = next
else:
super = next
elif len(toks[0]) == 5:
nucleus, op1, next1, op2, next2 = toks[0]
if op1 == op2:
if op1 == '_':
raise ParseFatalException("Double subscript")
else:
raise ParseFatalException("Double superscript")
if op1 == '_':
sub = next1
super = next2
else:
super = next1
sub = next2
else:
raise ParseFatalException(
"Subscript/superscript sequence is too long. "
"Use braces { } to remove ambiguity.")
state = self.get_state()
rule_thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
xHeight = state.font_output.get_xheight(
state.font, state.fontsize, state.dpi)
# Handle over/under symbols, such as sum or integral
if self.is_overunder(nucleus):
vlist = []
shift = 0.
width = nucleus.width
if super is not None:
super.shrink()
width = max(width, super.width)
if sub is not None:
sub.shrink()
width = max(width, sub.width)
if super is not None:
hlist = HCentered([super])
hlist.hpack(width, 'exactly')
vlist.extend([hlist, Kern(rule_thickness * 3.0)])
hlist = HCentered([nucleus])
hlist.hpack(width, 'exactly')
vlist.append(hlist)
if sub is not None:
hlist = HCentered([sub])
hlist.hpack(width, 'exactly')
vlist.extend([Kern(rule_thickness * 3.0), hlist])
shift = hlist.height + hlist.depth + rule_thickness * 2.0
vlist = Vlist(vlist)
vlist.shift_amount = shift + nucleus.depth * 0.5
result = Hlist([vlist])
return [result]
# Handle regular sub/superscripts
shift_up = nucleus.height - SUBDROP * xHeight
if self.is_dropsub(nucleus):
shift_down = nucleus.depth + SUBDROP * xHeight
else:
shift_down = SUBDROP * xHeight
if super is None:
# node757
sub.shrink()
x = Hlist([sub])
# x.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1)
clr = x.height - (abs(xHeight * 4.0) / 5.0)
shift_down = max(shift_down, clr)
x.shift_amount = shift_down
else:
super.shrink()
x = Hlist([super, Kern(SCRIPT_SPACE * xHeight)])
# x.width += SCRIPT_SPACE * xHeight
clr = SUP1 * xHeight
shift_up = max(shift_up, clr)
clr = x.depth + (abs(xHeight) / 4.0)
shift_up = max(shift_up, clr)
if sub is None:
x.shift_amount = -shift_up
else: # Both sub and superscript
sub.shrink()
y = Hlist([sub])
# y.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1 * xHeight)
clr = (2.0 * rule_thickness -
((shift_up - x.depth) - (y.height - shift_down)))
if clr > 0.:
shift_up += clr
shift_down += clr
if self.is_slanted(nucleus):
x.shift_amount = DELTA * (shift_up + shift_down)
x = Vlist([x,
Kern((shift_up - x.depth) - (y.height - shift_down)),
y])
x.shift_amount = shift_down
result = Hlist([nucleus, x])
return [result]
def frac(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
num, den = toks[0]
num.shrink()
den.shrink()
cnum = HCentered([num])
cden = HCentered([den])
width = max(num.width, den.width) + thickness * 10.
cnum.hpack(width, 'exactly')
cden.hpack(width, 'exactly')
vlist = Vlist([cnum, # numerator
Vbox(0, thickness * 2.0), # space
Hrule(state), # rule
Vbox(0, thickness * 4.0), # space
cden # denominator
])
# Shift so the fraction line sits in the middle of the
# equals sign
metrics = state.font_output.get_metrics(
state.font, 'it', '=', state.fontsize, state.dpi)
shift = (cden.height -
((metrics.ymax + metrics.ymin) / 2 -
thickness * 3.0))
vlist.shift_amount = shift
hlist = Hlist([vlist, Hbox(thickness * 2.)])
return [hlist]
def sqrt(self, s, loc, toks):
#~ print "sqrt", toks
root, body = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
# Determine the height of the body, and add a little extra to
# the height so it doesn't seem cramped
height = body.height - body.shift_amount + thickness * 5.0
depth = body.depth + body.shift_amount
check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True)
height = check.height - check.shift_amount
depth = check.depth + check.shift_amount
# Put a little extra space to the left and right of the body
padded_body = Hlist([Hbox(thickness * 2.0),
body,
Hbox(thickness * 2.0)])
rightside = Vlist([Hrule(state),
Fill(),
padded_body])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
depth, 'exactly')
# Add the root and shift it upward so it is above the tick.
# The value of 0.6 is a hard-coded hack ;)
if root is None:
root = Box(check.width * 0.5, 0., 0.)
else:
root = Hlist([Char(x, state) for x in root])
root.shrink()
root.shrink()
root_vlist = Vlist([Hlist([root])])
root_vlist.shift_amount = -height * 0.6
hlist = Hlist([root_vlist, # Root
# Negative kerning to put root over tick
Kern(-check.width * 0.5),
check, # Check
rightside]) # Body
return [hlist]
def auto_sized_delimiter(self, s, loc, toks):
#~ print "auto_sized_delimiter", toks
front, middle, back = toks
state = self.get_state()
height = max([x.height for x in middle])
depth = max([x.depth for x in middle])
parts = []
# \left. and \right. aren't supposed to produce any symbols
if front != '.':
parts.append(AutoHeightChar(front, height, depth, state))
parts.extend(middle.asList())
if back != '.':
parts.append(AutoHeightChar(back, height, depth, state))
hlist = Hlist(parts)
return hlist
###
##############################################################################
# MAIN
class MathTextParser(object):
_parser = None
_backend_mapping = {
'bitmap': MathtextBackendBitmap,
'agg' : MathtextBackendAgg,
'ps' : MathtextBackendPs,
'pdf' : MathtextBackendPdf,
'svg' : MathtextBackendSvg,
'cairo' : MathtextBackendCairo,
'macosx': MathtextBackendAgg,
}
_font_type_mapping = {
'cm' : BakomaFonts,
'stix' : StixFonts,
'stixsans' : StixSansFonts,
'custom' : UnicodeFonts
}
def __init__(self, output):
"""
Create a MathTextParser for the given backend *output*.
"""
self._output = output.lower()
self._cache = maxdict(50)
def parse(self, s, dpi = 72, prop = None):
"""
Parse the given math expression *s* at the given *dpi*. If
*prop* is provided, it is a
:class:`~matplotlib.font_manager.FontProperties` object
specifying the "default" font to use in the math expression,
used for all non-math text.
The results are cached, so multiple calls to :meth:`parse`
with the same expression should be fast.
"""
if prop is None:
prop = FontProperties()
cacheKey = (s, dpi, hash(prop))
result = self._cache.get(cacheKey)
if result is not None:
return result
if self._output == 'ps' and rcParams['ps.useafm']:
font_output = StandardPsFonts(prop)
else:
backend = self._backend_mapping[self._output]()
fontset = rcParams['mathtext.fontset']
fontset_class = self._font_type_mapping.get(fontset.lower())
if fontset_class is not None:
font_output = fontset_class(prop, backend)
else:
raise ValueError(
"mathtext.fontset must be either 'cm', 'stix', "
"'stixsans', or 'custom'")
fontsize = prop.get_size_in_points()
# This is a class variable so we don't rebuild the parser
# with each request.
if self._parser is None:
self.__class__._parser = Parser()
box = self._parser.parse(s, font_output, fontsize, dpi)
font_output.set_canvas_size(box.width, box.height, box.depth)
result = font_output.get_results(box)
self._cache[cacheKey] = result
# Free up the transient data structures
self._parser.clear()
# Fix cyclical references
font_output.destroy()
font_output.mathtext_backend.fonts_object = None
font_output.mathtext_backend = None
return result
def to_mask(self, texstr, dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
x = ftimage.as_array()
return x, depth
def to_rgba(self, texstr, color='black', dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
Any matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
x, depth = self.to_mask(texstr, dpi=dpi, fontsize=fontsize)
r, g, b = mcolors.colorConverter.to_rgb(color)
RGBA = np.zeros((x.shape[0], x.shape[1], 4), dtype=np.uint8)
RGBA[:,:,0] = int(255*r)
RGBA[:,:,1] = int(255*g)
RGBA[:,:,2] = int(255*b)
RGBA[:,:,3] = x
return RGBA, depth
def to_png(self, filename, texstr, color='black', dpi=120, fontsize=14):
"""
Writes a tex expression to a PNG file.
Returns the offset of the baseline from the bottom of the
image in pixels.
*filename*
A writable filename or fileobject
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
A valid matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns the offset of the baseline from the bottom of the
image in pixels.
"""
rgba, depth = self.to_rgba(texstr, color=color, dpi=dpi, fontsize=fontsize)
numrows, numcols, tmp = rgba.shape
_png.write_png(rgba.tostring(), numcols, numrows, filename)
return depth
def get_depth(self, texstr, dpi=120, fontsize=14):
"""
Returns the offset of the baseline from the bottom of the
image in pixels.
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
return depth
| agpl-3.0 |
pvlib/pvlib-python | pvlib/tests/test_clearsky.py | 1 | 30574 | from collections import OrderedDict
import numpy as np
from numpy import nan
import pandas as pd
import pytz
from scipy.linalg import hankel
import pytest
from numpy.testing import assert_allclose
from .conftest import assert_frame_equal, assert_series_equal
from pvlib.location import Location
from pvlib import clearsky
from pvlib import solarposition
from pvlib import atmosphere
from pvlib import irradiance
from .conftest import requires_tables, DATA_DIR
def test_ineichen_series():
times = pd.date_range(start='2014-06-24', end='2014-06-25', freq='3h',
tz='America/Phoenix')
apparent_zenith = pd.Series(np.array(
[124.0390863, 113.38779941, 82.85457044, 46.0467599, 10.56413562,
34.86074109, 72.41687122, 105.69538659, 124.05614124]),
index=times)
am = pd.Series(np.array(
[nan, nan, 6.97935524, 1.32355476, 0.93527685,
1.12008114, 3.01614096, nan, nan]),
index=times)
expected = pd.DataFrame(np.
array([[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 65.49426624, 321.16092181, 25.54562017],
[ 704.6968125 , 888.90147035, 87.73601277],
[1044.1230677 , 953.24925854, 107.03109696],
[ 853.02065704, 922.06124712, 96.42909484],
[ 251.99427693, 655.44925241, 53.9901349 ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ]]),
columns=['ghi', 'dni', 'dhi'],
index=times)
out = clearsky.ineichen(apparent_zenith, am, 3)
assert_frame_equal(expected, out)
def test_ineichen_series_perez_enhancement():
times = pd.date_range(start='2014-06-24', end='2014-06-25', freq='3h',
tz='America/Phoenix')
apparent_zenith = pd.Series(np.array(
[124.0390863, 113.38779941, 82.85457044, 46.0467599, 10.56413562,
34.86074109, 72.41687122, 105.69538659, 124.05614124]),
index=times)
am = pd.Series(np.array(
[nan, nan, 6.97935524, 1.32355476, 0.93527685,
1.12008114, 3.01614096, nan, nan]),
index=times)
expected = pd.DataFrame(np.
array([[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 91.1249279 , 321.16092171, 51.17628184],
[ 716.46580547, 888.9014706 , 99.50500553],
[1053.42066073, 953.24925905, 116.3286895 ],
[ 863.54692748, 922.06124652, 106.9553658 ],
[ 271.06382275, 655.44925213, 73.05968076],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ]]),
columns=['ghi', 'dni', 'dhi'],
index=times)
out = clearsky.ineichen(apparent_zenith, am, 3, perez_enhancement=True)
assert_frame_equal(expected, out)
def test_ineichen_scalar_input():
expected = OrderedDict()
expected['ghi'] = 1038.159219
expected['dni'] = 942.2081860378344
expected['dhi'] = 110.26529293612793
out = clearsky.ineichen(10., 1., 3.)
for k, v in expected.items():
assert_allclose(expected[k], out[k])
def test_ineichen_nans():
length = 4
apparent_zenith = np.full(length, 10.)
apparent_zenith[0] = np.nan
linke_turbidity = np.full(length, 3.)
linke_turbidity[1] = np.nan
dni_extra = np.full(length, 1370.)
dni_extra[2] = np.nan
airmass_absolute = np.full(length, 1.)
expected = OrderedDict()
expected['ghi'] = np.full(length, np.nan)
expected['dni'] = np.full(length, np.nan)
expected['dhi'] = np.full(length, np.nan)
expected['ghi'][length-1] = 1042.72590228
expected['dni'][length-1] = 946.35279683
expected['dhi'][length-1] = 110.75033088
out = clearsky.ineichen(apparent_zenith, airmass_absolute,
linke_turbidity, dni_extra=dni_extra)
for k, v in expected.items():
assert_allclose(expected[k], out[k])
def test_ineichen_arrays():
expected = OrderedDict()
expected['ghi'] = (np.
array([[[1095.77074798, 1054.17449885, 1014.15727338],
[ 839.40909243, 807.54451692, 776.88954373],
[ 190.27859353, 183.05548067, 176.10656239]],
[[ 773.49041181, 625.19479557, 505.33080493],
[ 592.52803177, 478.92699901, 387.10585505],
[ 134.31520045, 108.56393694, 87.74977339]],
[[ 545.9968869 , 370.78162375, 251.79449885],
[ 418.25788117, 284.03520249, 192.88577665],
[ 94.81136442, 64.38555328, 43.72365587]]]))
expected['dni'] = (np.
array([[[1014.38807396, 942.20818604, 861.11344424],
[1014.38807396, 942.20818604, 861.11344424],
[1014.38807396, 942.20818604, 861.11344424]],
[[ 687.61305142, 419.14891162, 255.50098235],
[ 687.61305142, 419.14891162, 255.50098235],
[ 687.61305142, 419.14891162, 255.50098235]],
[[ 458.62196014, 186.46177428, 75.80970012],
[ 458.62196014, 186.46177428, 75.80970012],
[ 458.62196014, 186.46177428, 75.80970012]]]))
expected['dhi'] = (np.
array([[[ 81.38267402, 111.96631281, 153.04382915],
[ 62.3427452 , 85.77117175, 117.23837487],
[ 14.13195304, 19.44274618, 26.57578203]],
[[ 85.87736039, 206.04588395, 249.82982258],
[ 65.78587472, 157.84030442, 191.38074731],
[ 14.91244713, 35.77949226, 43.38249342]],
[[ 87.37492676, 184.31984947, 175.98479873],
[ 66.93307711, 141.19719644, 134.81217714],
[ 15.17249681, 32.00680597, 30.5594396 ]]]))
apparent_zenith = np.linspace(0, 80, 3)
airmass_absolute = np.linspace(1, 10, 3)
linke_turbidity = np.linspace(2, 4, 3)
apparent_zenith, airmass_absolute, linke_turbidity = \
np.meshgrid(apparent_zenith, airmass_absolute, linke_turbidity)
out = clearsky.ineichen(apparent_zenith, airmass_absolute, linke_turbidity)
for k, v in expected.items():
assert_allclose(expected[k], out[k])
def test_ineichen_dni_extra():
expected = pd.DataFrame(
np.array([[1042.72590228, 946.35279683, 110.75033088]]),
columns=['ghi', 'dni', 'dhi'])
out = clearsky.ineichen(10, 1, 3, dni_extra=pd.Series(1370))
assert_frame_equal(expected, out)
def test_ineichen_altitude():
expected = pd.DataFrame(
np.array([[1134.24312405, 994.95377835, 154.40492924]]),
columns=['ghi', 'dni', 'dhi'])
out = clearsky.ineichen(10, 1, 3, altitude=pd.Series(2000))
assert_frame_equal(expected, out)
@requires_tables
def test_lookup_linke_turbidity():
times = pd.date_range(start='2014-06-24', end='2014-06-25',
freq='12h', tz='America/Phoenix')
# expect same value on 2014-06-24 0000 and 1200, and
# diff value on 2014-06-25
expected = pd.Series(
np.array([3.11803278689, 3.11803278689, 3.13114754098]), index=times
)
out = clearsky.lookup_linke_turbidity(times, 32.125, -110.875)
assert_series_equal(expected, out)
@requires_tables
def test_lookup_linke_turbidity_leapyear():
times = pd.date_range(start='2016-06-24', end='2016-06-25',
freq='12h', tz='America/Phoenix')
# expect same value on 2016-06-24 0000 and 1200, and
# diff value on 2016-06-25
expected = pd.Series(
np.array([3.11803278689, 3.11803278689, 3.13114754098]), index=times
)
out = clearsky.lookup_linke_turbidity(times, 32.125, -110.875)
assert_series_equal(expected, out)
@requires_tables
def test_lookup_linke_turbidity_nointerp():
times = pd.date_range(start='2014-06-24', end='2014-06-25',
freq='12h', tz='America/Phoenix')
# expect same value for all days
expected = pd.Series(np.array([3., 3., 3.]), index=times)
out = clearsky.lookup_linke_turbidity(times, 32.125, -110.875,
interp_turbidity=False)
assert_series_equal(expected, out)
@requires_tables
def test_lookup_linke_turbidity_months():
times = pd.date_range(start='2014-04-01', end='2014-07-01',
freq='1M', tz='America/Phoenix')
expected = pd.Series(
np.array([2.89918032787, 2.97540983607, 3.19672131148]), index=times
)
out = clearsky.lookup_linke_turbidity(times, 32.125, -110.875)
assert_series_equal(expected, out)
@requires_tables
def test_lookup_linke_turbidity_months_leapyear():
times = pd.date_range(start='2016-04-01', end='2016-07-01',
freq='1M', tz='America/Phoenix')
expected = pd.Series(
np.array([2.89918032787, 2.97540983607, 3.19672131148]), index=times
)
out = clearsky.lookup_linke_turbidity(times, 32.125, -110.875)
assert_series_equal(expected, out)
@requires_tables
def test_lookup_linke_turbidity_nointerp_months():
times = pd.date_range(start='2014-04-10', end='2014-07-10',
freq='1M', tz='America/Phoenix')
expected = pd.Series(np.array([2.85, 2.95, 3.]), index=times)
out = clearsky.lookup_linke_turbidity(times, 32.125, -110.875,
interp_turbidity=False)
assert_series_equal(expected, out)
# changing the dates shouldn't matter if interp=False
times = pd.date_range(start='2014-04-05', end='2014-07-05',
freq='1M', tz='America/Phoenix')
out = clearsky.lookup_linke_turbidity(times, 32.125, -110.875,
interp_turbidity=False)
assert_series_equal(expected, out)
def test_haurwitz():
apparent_solar_elevation = np.array([-20, -0.05, -0.001, 5, 10, 30, 50, 90])
apparent_solar_zenith = 90 - apparent_solar_elevation
data_in = pd.DataFrame(data=apparent_solar_zenith,
index=apparent_solar_zenith,
columns=['apparent_zenith'])
expected = pd.DataFrame(np.array([0.,
0.,
0.,
48.6298687941956,
135.741748091813,
487.894132885425,
778.766689344363,
1035.09203253450]),
columns=['ghi'],
index=apparent_solar_zenith)
out = clearsky.haurwitz(data_in['apparent_zenith'])
assert_frame_equal(expected, out)
def test_simplified_solis_scalar_elevation():
expected = OrderedDict()
expected['ghi'] = 1064.653145
expected['dni'] = 959.335463
expected['dhi'] = 129.125602
out = clearsky.simplified_solis(80)
for k, v in expected.items():
assert_allclose(expected[k], out[k])
def test_simplified_solis_scalar_neg_elevation():
expected = OrderedDict()
expected['ghi'] = 0
expected['dni'] = 0
expected['dhi'] = 0
out = clearsky.simplified_solis(-10)
for k, v in expected.items():
assert_allclose(expected[k], out[k])
def test_simplified_solis_series_elevation():
expected = pd.DataFrame(
np.array([[959.335463, 1064.653145, 129.125602]]),
columns=['dni', 'ghi', 'dhi'])
expected = expected[['ghi', 'dni', 'dhi']]
out = clearsky.simplified_solis(pd.Series(80))
assert_frame_equal(expected, out)
def test_simplified_solis_dni_extra():
expected = pd.DataFrame(np.array([[963.555414, 1069.33637, 129.693603]]),
columns=['dni', 'ghi', 'dhi'])
expected = expected[['ghi', 'dni', 'dhi']]
out = clearsky.simplified_solis(80, dni_extra=pd.Series(1370))
assert_frame_equal(expected, out)
def test_simplified_solis_pressure():
expected = pd.DataFrame(np.
array([[ 964.26930718, 1067.96543669, 127.22841797],
[ 961.88811874, 1066.36847963, 128.1402539 ],
[ 959.58112234, 1064.81837558, 129.0304193 ]]),
columns=['dni', 'ghi', 'dhi'])
expected = expected[['ghi', 'dni', 'dhi']]
out = clearsky.simplified_solis(
80, pressure=pd.Series([95000, 98000, 101000]))
assert_frame_equal(expected, out)
def test_simplified_solis_aod700():
expected = pd.DataFrame(np.
array([[ 1056.61710493, 1105.7229086 , 64.41747323],
[ 1007.50558875, 1085.74139063, 102.96233698],
[ 959.3354628 , 1064.65314509, 129.12560167],
[ 342.45810926, 638.63409683, 77.71786575],
[ 55.24140911, 7.5413313 , 0. ]]),
columns=['dni', 'ghi', 'dhi'])
expected = expected[['ghi', 'dni', 'dhi']]
aod700 = pd.Series([0.0, 0.05, 0.1, 1, 10])
out = clearsky.simplified_solis(80, aod700=aod700)
assert_frame_equal(expected, out)
def test_simplified_solis_precipitable_water():
expected = pd.DataFrame(np.
array([[ 1001.15353307, 1107.84678941, 128.58887606],
[ 1001.15353307, 1107.84678941, 128.58887606],
[ 983.51027357, 1089.62306672, 129.08755996],
[ 959.3354628 , 1064.65314509, 129.12560167],
[ 872.02335029, 974.18046717, 125.63581346]]),
columns=['dni', 'ghi', 'dhi'])
expected = expected[['ghi', 'dni', 'dhi']]
out = clearsky.simplified_solis(
80, precipitable_water=pd.Series([0.0, 0.2, 0.5, 1.0, 5.0]))
assert_frame_equal(expected, out)
def test_simplified_solis_small_scalar_pw():
expected = OrderedDict()
expected['ghi'] = 1107.84678941
expected['dni'] = 1001.15353307
expected['dhi'] = 128.58887606
out = clearsky.simplified_solis(80, precipitable_water=0.1)
for k, v in expected.items():
assert_allclose(expected[k], out[k])
def test_simplified_solis_return_arrays():
expected = OrderedDict()
expected['ghi'] = np.array([[ 1148.40081325, 913.42330823],
[ 965.48550828, 760.04527609]])
expected['dni'] = np.array([[ 1099.25706525, 656.24601381],
[ 915.31689149, 530.31697378]])
expected['dhi'] = np.array([[ 64.1063074 , 254.6186615 ],
[ 62.75642216, 232.21931597]])
aod700 = np.linspace(0, 0.5, 2)
precipitable_water = np.linspace(0, 10, 2)
aod700, precipitable_water = np.meshgrid(aod700, precipitable_water)
out = clearsky.simplified_solis(80, aod700, precipitable_water)
for k, v in expected.items():
assert_allclose(expected[k], out[k])
def test_simplified_solis_nans_arrays():
# construct input arrays that each have 1 nan offset from each other,
# the last point is valid for all arrays
length = 6
apparent_elevation = np.full(length, 80.)
apparent_elevation[0] = np.nan
aod700 = np.full(length, 0.1)
aod700[1] = np.nan
precipitable_water = np.full(length, 0.5)
precipitable_water[2] = np.nan
pressure = np.full(length, 98000.)
pressure[3] = np.nan
dni_extra = np.full(length, 1370.)
dni_extra[4] = np.nan
expected = OrderedDict()
expected['ghi'] = np.full(length, np.nan)
expected['dni'] = np.full(length, np.nan)
expected['dhi'] = np.full(length, np.nan)
expected['ghi'][length-1] = 1096.022736
expected['dni'][length-1] = 990.306854
expected['dhi'][length-1] = 128.664594
out = clearsky.simplified_solis(apparent_elevation, aod700,
precipitable_water, pressure, dni_extra)
for k, v in expected.items():
assert_allclose(expected[k], out[k])
def test_simplified_solis_nans_series():
# construct input arrays that each have 1 nan offset from each other,
# the last point is valid for all arrays
length = 6
apparent_elevation = pd.Series(np.full(length, 80.))
apparent_elevation[0] = np.nan
aod700 = np.full(length, 0.1)
aod700[1] = np.nan
precipitable_water = np.full(length, 0.5)
precipitable_water[2] = np.nan
pressure = np.full(length, 98000.)
pressure[3] = np.nan
dni_extra = np.full(length, 1370.)
dni_extra[4] = np.nan
expected = OrderedDict()
expected['ghi'] = np.full(length, np.nan)
expected['dni'] = np.full(length, np.nan)
expected['dhi'] = np.full(length, np.nan)
expected['ghi'][length-1] = 1096.022736
expected['dni'][length-1] = 990.306854
expected['dhi'][length-1] = 128.664594
expected = pd.DataFrame.from_dict(expected)
out = clearsky.simplified_solis(apparent_elevation, aod700,
precipitable_water, pressure, dni_extra)
assert_frame_equal(expected, out)
@requires_tables
def test_linke_turbidity_corners():
"""Test Linke turbidity corners out of bounds."""
months = pd.DatetimeIndex('%d/1/2016' % (m + 1) for m in range(12))
def monthly_lt_nointerp(lat, lon, time=months):
"""monthly Linke turbidity factor without time interpolation"""
return clearsky.lookup_linke_turbidity(
time, lat, lon, interp_turbidity=False
)
# Northwest
assert np.allclose(
monthly_lt_nointerp(90, -180),
[1.9, 1.9, 1.9, 2.0, 2.05, 2.05, 2.1, 2.1, 2.0, 1.95, 1.9, 1.9])
# Southwest
assert np.allclose(
monthly_lt_nointerp(-90, -180),
[1.35, 1.3, 1.45, 1.35, 1.35, 1.35, 1.35, 1.35, 1.35, 1.4, 1.4, 1.3])
# Northeast
assert np.allclose(
monthly_lt_nointerp(90, 180),
[1.9, 1.9, 1.9, 2.0, 2.05, 2.05, 2.1, 2.1, 2.0, 1.95, 1.9, 1.9])
# Southeast
assert np.allclose(
monthly_lt_nointerp(-90, 180),
[1.35, 1.7, 1.35, 1.35, 1.35, 1.35, 1.35, 1.35, 1.35, 1.35, 1.35, 1.7])
# test out of range exceptions at corners
with pytest.raises(IndexError):
monthly_lt_nointerp(91, -122) # exceeds max latitude
with pytest.raises(IndexError):
monthly_lt_nointerp(38.2, 181) # exceeds max longitude
with pytest.raises(IndexError):
monthly_lt_nointerp(-91, -122) # exceeds min latitude
with pytest.raises(IndexError):
monthly_lt_nointerp(38.2, -181) # exceeds min longitude
def test_degrees_to_index_1():
"""Test that _degrees_to_index raises an error when something other than
'latitude' or 'longitude' is passed."""
with pytest.raises(IndexError): # invalid value for coordinate argument
clearsky._degrees_to_index(degrees=22.0, coordinate='width')
@pytest.fixture
def detect_clearsky_data():
data_file = DATA_DIR / 'detect_clearsky_data.csv'
expected = pd.read_csv(
data_file, index_col=0, parse_dates=True, comment='#')
expected = expected.tz_localize('UTC').tz_convert('Etc/GMT+7')
metadata = {}
with data_file.open() as f:
for line in f:
if line.startswith('#'):
key, value = line.strip('# \n').split(':')
metadata[key] = float(value)
else:
break
metadata['window_length'] = int(metadata['window_length'])
loc = Location(metadata['latitude'], metadata['longitude'],
altitude=metadata['elevation'])
# specify turbidity to guard against future lookup changes
cs = loc.get_clearsky(expected.index, linke_turbidity=2.658197)
return expected, cs
def test_detect_clearsky(detect_clearsky_data):
expected, cs = detect_clearsky_data
clear_samples = clearsky.detect_clearsky(
expected['GHI'], cs['ghi'], times=cs.index, window_length=10)
assert_series_equal(expected['Clear or not'], clear_samples,
check_dtype=False, check_names=False)
def test_detect_clearsky_defaults(detect_clearsky_data):
expected, cs = detect_clearsky_data
clear_samples = clearsky.detect_clearsky(
expected['GHI'], cs['ghi'])
assert_series_equal(expected['Clear or not'], clear_samples,
check_dtype=False, check_names=False)
def test_detect_clearsky_components(detect_clearsky_data):
expected, cs = detect_clearsky_data
clear_samples, components, alpha = clearsky.detect_clearsky(
expected['GHI'], cs['ghi'], times=cs.index, window_length=10,
return_components=True)
assert_series_equal(expected['Clear or not'], clear_samples,
check_dtype=False, check_names=False)
assert isinstance(components, OrderedDict)
assert np.allclose(alpha, 0.9633903181941296)
def test_detect_clearsky_iterations(detect_clearsky_data):
expected, cs = detect_clearsky_data
alpha = 1.0448
with pytest.warns(RuntimeWarning):
clear_samples = clearsky.detect_clearsky(
expected['GHI'], cs['ghi']*alpha, max_iterations=1)
assert clear_samples[:'2012-04-01 10:41:00'].all()
assert not clear_samples['2012-04-01 10:42:00':].any() # expected False
clear_samples = clearsky.detect_clearsky(
expected['GHI'], cs['ghi']*alpha, max_iterations=20)
assert_series_equal(expected['Clear or not'], clear_samples,
check_dtype=False, check_names=False)
def test_detect_clearsky_kwargs(detect_clearsky_data):
expected, cs = detect_clearsky_data
clear_samples = clearsky.detect_clearsky(
expected['GHI'], cs['ghi'], times=cs.index, window_length=10,
mean_diff=1000, max_diff=1000, lower_line_length=-1000,
upper_line_length=1000, var_diff=10, slope_dev=1000)
assert clear_samples.all()
def test_detect_clearsky_window(detect_clearsky_data):
expected, cs = detect_clearsky_data
clear_samples = clearsky.detect_clearsky(
expected['GHI'], cs['ghi'], window_length=3)
expected = expected['Clear or not'].copy()
expected.iloc[-3:] = True
assert_series_equal(expected, clear_samples,
check_dtype=False, check_names=False)
def test_detect_clearsky_time_interval(detect_clearsky_data):
expected, cs = detect_clearsky_data
u = np.arange(0, len(cs), 2)
cs2 = cs.iloc[u]
expected2 = expected.iloc[u]
clear_samples = clearsky.detect_clearsky(
expected2['GHI'], cs2['ghi'], window_length=6)
assert_series_equal(expected2['Clear or not'], clear_samples,
check_dtype=False, check_names=False)
def test_detect_clearsky_arrays(detect_clearsky_data):
expected, cs = detect_clearsky_data
clear_samples = clearsky.detect_clearsky(
expected['GHI'].values, cs['ghi'].values, times=cs.index,
window_length=10)
assert isinstance(clear_samples, np.ndarray)
assert (clear_samples == expected['Clear or not'].values).all()
def test_detect_clearsky_irregular_times(detect_clearsky_data):
expected, cs = detect_clearsky_data
times = cs.index.values.copy()
times[0] += 10**9
times = pd.DatetimeIndex(times)
with pytest.raises(NotImplementedError):
clearsky.detect_clearsky(expected['GHI'].values, cs['ghi'].values,
times, 10)
def test_detect_clearsky_missing_index(detect_clearsky_data):
expected, cs = detect_clearsky_data
with pytest.raises(ValueError):
clearsky.detect_clearsky(expected['GHI'].values, cs['ghi'].values)
@pytest.fixture
def detect_clearsky_helper_data():
samples_per_window = 3
sample_interval = 1
x = pd.Series(np.arange(0, 7)**2.)
# line length between adjacent points
sqt = pd.Series(np.sqrt(np.array([np.nan, 2., 10., 26., 50., 82, 122.])))
H = hankel(np.arange(samples_per_window),
np.arange(samples_per_window-1, len(sqt)))
return x, samples_per_window, sample_interval, H
def test__line_length_windowed(detect_clearsky_helper_data):
x, samples_per_window, sample_interval, H = detect_clearsky_helper_data
# sqt is hand-calculated assuming window=3
# line length between adjacent points
sqt = pd.Series(np.sqrt(np.array([np.nan, 2., 10., 26., 50., 82, 122.])))
expected = {}
expected['line_length'] = sqt + sqt.shift(-1)
result = clearsky._line_length_windowed(
x, H, samples_per_window, sample_interval)
assert_series_equal(result, expected['line_length'])
def test__max_diff_windowed(detect_clearsky_helper_data):
x, samples_per_window, sample_interval, H = detect_clearsky_helper_data
expected = {}
expected['max_diff'] = pd.Series(
data=[np.nan, 3., 5., 7., 9., 11., np.nan], index=x.index)
result = clearsky._max_diff_windowed(x, H, samples_per_window)
assert_series_equal(result, expected['max_diff'])
def test__calc_stats(detect_clearsky_helper_data):
x, samples_per_window, sample_interval, H = detect_clearsky_helper_data
# stats are hand-computed assuming window = 3, sample_interval = 1,
# and right-aligned labels
mean_x = pd.Series(np.array([np.nan, np.nan, 5, 14, 29, 50, 77]) / 3.)
max_x = pd.Series(np.array([np.nan, np.nan, 4, 9, 16, 25, 36]))
diff_std = np.array([np.nan, np.nan, np.sqrt(2), np.sqrt(2), np.sqrt(2),
np.sqrt(2), np.sqrt(2)])
slope_nstd = diff_std / mean_x
slope = x.diff().shift(-1)
expected = {}
expected['mean'] = mean_x.shift(-1) # shift to align to center
expected['max'] = max_x.shift(-1)
# slope between adjacent points
expected['slope'] = slope
expected['slope_nstd'] = slope_nstd.shift(-1)
result = clearsky._calc_stats(
x, samples_per_window, sample_interval, H)
res_mean, res_max, res_slope_nstd, res_slope = result
assert_series_equal(res_mean, expected['mean'])
assert_series_equal(res_max, expected['max'])
assert_series_equal(res_slope_nstd, expected['slope_nstd'])
assert_series_equal(res_slope, expected['slope'])
def test_bird():
"""Test Bird/Hulstrom Clearsky Model"""
times = pd.date_range(start='1/1/2015 0:00', end='12/31/2015 23:00',
freq='H')
tz = -7 # test timezone
gmt_tz = pytz.timezone('Etc/GMT%+d' % -(tz))
times = times.tz_localize(gmt_tz) # set timezone
# match test data from BIRD_08_16_2012.xls
latitude = 40.
longitude = -105.
press_mB = 840.
o3_cm = 0.3
h2o_cm = 1.5
aod_500nm = 0.1
aod_380nm = 0.15
b_a = 0.85
alb = 0.2
eot = solarposition.equation_of_time_spencer71(times.dayofyear)
hour_angle = solarposition.hour_angle(times, longitude, eot) - 0.5 * 15.
declination = solarposition.declination_spencer71(times.dayofyear)
zenith = solarposition.solar_zenith_analytical(
np.deg2rad(latitude), np.deg2rad(hour_angle), declination
)
zenith = np.rad2deg(zenith)
airmass = atmosphere.get_relative_airmass(zenith, model='kasten1966')
etr = irradiance.get_extra_radiation(times)
# test Bird with time series data
field_names = ('dni', 'direct_horizontal', 'ghi', 'dhi')
irrads = clearsky.bird(
zenith, airmass, aod_380nm, aod_500nm, h2o_cm, o3_cm, press_mB * 100.,
etr, b_a, alb
)
Eb, Ebh, Gh, Dh = (irrads[_] for _ in field_names)
data_path = DATA_DIR / 'BIRD_08_16_2012.csv'
testdata = pd.read_csv(data_path, usecols=range(1, 26), header=1).dropna()
testdata.index = times[1:48]
assert np.allclose(testdata['DEC'], np.rad2deg(declination[1:48]))
assert np.allclose(testdata['EQT'], eot[1:48], rtol=1e-4)
assert np.allclose(testdata['Hour Angle'], hour_angle[1:48])
assert np.allclose(testdata['Zenith Ang'], zenith[1:48])
dawn = zenith < 88.
dusk = testdata['Zenith Ang'] < 88.
am = pd.Series(np.where(dawn, airmass, 0.), index=times).fillna(0.0)
assert np.allclose(
testdata['Air Mass'].where(dusk, 0.), am[1:48], rtol=1e-3
)
direct_beam = pd.Series(np.where(dawn, Eb, 0.), index=times).fillna(0.)
assert np.allclose(
testdata['Direct Beam'].where(dusk, 0.), direct_beam[1:48], rtol=1e-3
)
direct_horz = pd.Series(np.where(dawn, Ebh, 0.), index=times).fillna(0.)
assert np.allclose(
testdata['Direct Hz'].where(dusk, 0.), direct_horz[1:48], rtol=1e-3
)
global_horz = pd.Series(np.where(dawn, Gh, 0.), index=times).fillna(0.)
assert np.allclose(
testdata['Global Hz'].where(dusk, 0.), global_horz[1:48], rtol=1e-3
)
diffuse_horz = pd.Series(np.where(dawn, Dh, 0.), index=times).fillna(0.)
assert np.allclose(
testdata['Dif Hz'].where(dusk, 0.), diffuse_horz[1:48], rtol=1e-3
)
# test keyword parameters
irrads2 = clearsky.bird(
zenith, airmass, aod_380nm, aod_500nm, h2o_cm, dni_extra=etr
)
Eb2, Ebh2, Gh2, Dh2 = (irrads2[_] for _ in field_names)
data_path = DATA_DIR / 'BIRD_08_16_2012_patm.csv'
testdata2 = pd.read_csv(data_path, usecols=range(1, 26), header=1).dropna()
testdata2.index = times[1:48]
direct_beam2 = pd.Series(np.where(dawn, Eb2, 0.), index=times).fillna(0.)
assert np.allclose(
testdata2['Direct Beam'].where(dusk, 0.), direct_beam2[1:48], rtol=1e-3
)
direct_horz2 = pd.Series(np.where(dawn, Ebh2, 0.), index=times).fillna(0.)
assert np.allclose(
testdata2['Direct Hz'].where(dusk, 0.), direct_horz2[1:48], rtol=1e-3
)
global_horz2 = pd.Series(np.where(dawn, Gh2, 0.), index=times).fillna(0.)
assert np.allclose(
testdata2['Global Hz'].where(dusk, 0.), global_horz2[1:48], rtol=1e-3
)
diffuse_horz2 = pd.Series(np.where(dawn, Dh2, 0.), index=times).fillna(0.)
assert np.allclose(
testdata2['Dif Hz'].where(dusk, 0.), diffuse_horz2[1:48], rtol=1e-3
)
# test scalars just at noon
# XXX: calculations start at 12am so noon is at index = 12
irrads3 = clearsky.bird(
zenith[12], airmass[12], aod_380nm, aod_500nm, h2o_cm, dni_extra=etr[12]
)
Eb3, Ebh3, Gh3, Dh3 = (irrads3[_] for _ in field_names)
# XXX: testdata starts at 1am so noon is at index = 11
np.allclose(
[Eb3, Ebh3, Gh3, Dh3],
testdata2[['Direct Beam', 'Direct Hz', 'Global Hz', 'Dif Hz']].iloc[11],
rtol=1e-3)
return pd.DataFrame({'Eb': Eb, 'Ebh': Ebh, 'Gh': Gh, 'Dh': Dh}, index=times)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.