repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
AndKyr/GETELEC | python/JFplot.py | 1 | 1648 | #! /usr/bin/python
import numpy as np
import getelec_mod as gt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib as mb
font = 30
# mb.rcParams["font.family"] = "Serif"
mb.rcParams["font.size"] = font
mb.rcParams["axes.labelsize"] = font
mb.rcParams["xtick.labelsize"] = font
mb.rcParams["ytick.labelsize"] = font
mb.rcParams["legend.fontsize"] = font
mb.rcParams["lines.linewidth"] = 2.5
fsize = (18,10)
Npoints = 256
Temps = [1.e-2, 300, 800, 1500]
Xfn = np.linspace(0.12, 0.35, 256)
F = 1./Xfn
Jem = np.copy(F)
this = gt.emission_create(W = 4.5, R = 5000., approx = 2)
fig1 = plt.figure(figsize=fsize)
ax1 = fig1.gca()
ax1.set_xlabel(r"$1/F$ [m GV$^{-1}$]")
ax1.set_ylabel(r"$J$ [A nm$^{-2}$]")
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
for i in range(len(Temps)):
this.Temp = Temps[i]
if (this.Temp < 10.):
this.approx = -1
else:
this.approx = 2
for j in range(len(F)):
this.F = F[j]
this.cur_dens()
Jem[j] = this.Jem
ax1.semilogy(Xfn,Jem, label = r'T = %d K'%this.Temp)
# for i in range(len(Temps)):
# this.Temp = Temps[i]
# if (this.Temp < 10.):
# this.approx = -1
# else:
# this.approx = -1
# for j in range(len(F)):
# this.F = F[j]
# this.cur_dens()
# Jem[j] = this.Jem
# ax1.semilogy(Xfn,Jem, '--', color = colors[i], label = r'T = %d K'%this.Temp)
# np.savetxt("J-F.dat", np.transpose(np.array([F,Jem])), delimiter = " ")
ax1.grid()
ax1.legend()
plt.savefig("JFplot_Tparam.svg")
plt.savefig("JFplot_Tparam.png")
plt.show()
| gpl-3.0 |
ipashchenko/emcee-x | document/plots/oned.py | 16 | 2164 | import os
import sys
import time
import numpy as np
import matplotlib.pyplot as pl
import h5py
from multiprocessing import Pool
sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
import emcee
# import acor
def lnprobfn(p, icov):
return -0.5 * np.dot(p, np.dot(icov, p))
def random_cov(ndim, dof=1):
v = np.random.randn(ndim * (ndim + dof)).reshape((ndim + dof, ndim))
return (sum([np.outer(v[i], v[i]) for i in range(ndim + dof)])
/ (ndim + dof))
_rngs = {}
def _worker(args):
i, outfn, nsteps = args
pid = os.getpid()
_random = _rngs.get(pid, np.random.RandomState(int(int(pid)
+ time.time())))
_rngs[pid] = _random
ndim = int(np.ceil(2 ** (7 * _random.rand())))
nwalkers = 2 * ndim + 2
# nwalkers += nwalkers % 2
print ndim, nwalkers
cov = random_cov(ndim)
icov = np.linalg.inv(cov)
ens_samp = emcee.EnsembleSampler(nwalkers, ndim, lnprobfn,
args=[icov])
ens_samp.random_state = _random.get_state()
pos, lnprob, state = ens_samp.run_mcmc(np.random.randn(nwalkers * ndim)
.reshape([nwalkers, ndim]), nsteps)
proposal = np.diag(cov.diagonal())
mh_samp = emcee.MHSampler(proposal, ndim, lnprobfn,
args=[icov])
mh_samp.random_state = state
mh_samp.run_mcmc(np.random.randn(ndim), nsteps)
f = h5py.File(outfn)
f["data"][i, :] = np.array([ndim, np.mean(ens_samp.acor),
np.mean(mh_samp.acor)])
f.close()
def oned():
nsteps = 10000
niter = 10
nthreads = 2
outfn = os.path.join(os.path.split(__file__)[0], "gauss_scaling.h5")
print outfn
f = h5py.File(outfn, "w")
f.create_dataset("data", (niter, 3), "f")
f.close()
pool = Pool(nthreads)
pool.map(_worker, [(i, outfn, nsteps) for i in range(niter)])
f = h5py.File(outfn)
data = f["data"][...]
f.close()
pl.clf()
pl.plot(data[:, 0], data[:, 1], "ks", alpha=0.5)
pl.plot(data[:, 0], data[:, 2], ".k", alpha=0.5)
pl.savefig(os.path.join(os.path.split(__file__)[0], "gauss_scaling.png"))
if __name__ == "__main__":
oned()
| mit |
GGoussar/scikit-image | doc/examples/segmentation/plot_marked_watershed.py | 9 | 1988 | """
===============================
Markers for watershed transform
===============================
The watershed is a classical algorithm used for **segmentation**, that
is, for separating different objects in an image.
Here a marker image is built from the region of low gradient inside the image.
In a gradient image, the areas of high values provide barriers that help to
segment the image.
Using markers on the lower values will ensure that the segmented objects are
found.
See Wikipedia_ for more details on the algorithm.
.. _Wikipedia: http://en.wikipedia.org/wiki/Watershed_(image_processing)
"""
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from skimage.morphology import watershed, disk
from skimage import data
from skimage.filters import rank
from skimage.util import img_as_ubyte
image = img_as_ubyte(data.camera())
# denoise image
denoised = rank.median(image, disk(2))
# find continuous region (low gradient -
# where less than 10 for this image) --> markers
# disk(5) is used here to get a more smooth image
markers = rank.gradient(denoised, disk(5)) < 10
markers = ndi.label(markers)[0]
# local gradient (disk(2) is used to keep edges thin)
gradient = rank.gradient(denoised, disk(2))
# process the watershed
labels = watershed(gradient, markers)
# display results
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 8), sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'})
ax = axes.ravel()
ax[0].imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax[0].set_title("Original")
ax[1].imshow(gradient, cmap=plt.cm.spectral, interpolation='nearest')
ax[1].set_title("Local Gradient")
ax[2].imshow(markers, cmap=plt.cm.spectral, interpolation='nearest')
ax[2].set_title("Markers")
ax[3].imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax[3].imshow(labels, cmap=plt.cm.spectral, interpolation='nearest', alpha=.7)
ax[3].set_title("Segmented")
for a in ax:
a.axis('off')
fig.tight_layout()
plt.show()
| bsd-3-clause |
karlnapf/kameleon-mcmc | kameleon_mcmc/tools/Visualise.py | 1 | 5656 | """
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Written (W) 2013 Heiko Strathmann
Written (W) 2013 Dino Sejdinovic
"""
from kameleon_mcmc.distribution.Gaussian import Gaussian
from matplotlib.patches import Ellipse
from matplotlib.pyplot import imshow, ylim, xlim, contour, plot, hold, gca
from numpy import linspace
from numpy.linalg.linalg import eigh
from numpy import zeros, array, exp, arctan2, sqrt
import numpy
class Visualise(object):
def __init__(self):
pass
@staticmethod
def get_plotting_arrays(distribution):
bounds = distribution.get_plotting_bounds()
assert(len(bounds) == 2)
Xs = linspace(bounds[0][0], bounds[0][1])
Ys = linspace(bounds[1][0], bounds[1][1])
return Xs, Ys
@staticmethod
def visualise_distribution(distribution, Z=None, log_density=False, Xs=None, Ys=None):
"""
Plots the density of a given Distribution instance and plots some
samples on top.
"""
if Xs is None or Ys is None:
Xs, Ys = Visualise.get_plotting_arrays(distribution)
Visualise.plot_density(distribution, Xs, Ys)
if Z is not None:
hold(True)
Visualise.plot_data(Z)
hold(False)
@staticmethod
def plot_density(distribution, Xs, Ys, log_domain=False):
"""
Plots a 2D density
density - density - distribution instance to plot
Xs - x values the density is evaluated at
Ys - y values the density is evaluated at
log_domain - if False, density will be put into exponential function
"""
assert(distribution.dimension == 2)
D = zeros((len(Xs), len(Ys)))
# compute log-density
for i in range(len(Xs)):
for j in range(len(Ys)):
x = array([[Xs[i], Ys[j]]])
D[j, i] = distribution.log_pdf(x)
if log_domain == False:
D = exp(D)
im = imshow(D, origin='lower')
im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()])
im.set_interpolation('nearest')
im.set_cmap('gray')
ylim([Ys.min(), Ys.max()])
xlim([Xs.min(), Xs.max()])
@staticmethod
def contour_plot_density(distribution, Xs=None, Ys=None, log_domain=False):
"""
Contour-plots a 2D density. If Gaussian, plots 1.96 interval contour only
density - distribution instance to plot
Xs - x values the density is evaluated at
Ys - y values the density is evaluated at
log_domain - if False, density will be put into exponential function
"""
if isinstance(distribution, Gaussian) and log_domain == False:
gca().add_artist(Visualise.get_gaussian_ellipse_artist(distribution))
gca().plot(distribution.mu[0], distribution.mu[1], 'r*', \
markersize=3.0, markeredgewidth=.1)
return
assert(distribution.dimension == 2)
if Xs is None:
(xmin, xmax), _ = distribution.get_plotting_bounds()
Xs = linspace(xmin, xmax)
if Ys is None:
_, (ymin, ymax) = distribution.get_plotting_bounds()
Ys = linspace(ymin, ymax)
D = zeros((len(Ys), len(Xs)))
# compute log-density
for i in range(len(Xs)):
for j in range(len(Ys)):
x = array([[Xs[i], Ys[j]]])
D[j, i] = distribution.log_pdf(x)
if log_domain == False:
D = exp(D)
contour(Xs, Ys, D, origin='lower')
@staticmethod
def plot_array(Xs, Ys, D):
"""
Plots a 2D array
Xs - x values the density is evaluated at
Ys - y values the density is evaluated at
D - array to plot
"""
im = imshow(D, origin='lower')
im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()])
im.set_interpolation('nearest')
im.set_cmap('gray')
ylim([Ys.min(), Ys.max()])
xlim([Xs.min(), Xs.max()])
@staticmethod
def plot_data(Z, y=None):
"""
Plots collection of 2D points and optionally adds a marker to one of them
Z - set of row-vectors points to plot
y - one point that is marked in red, might be None
"""
plot(Z[:, 0], Z[:, 1], '*', markersize=3.0, markeredgewidth=.1)
if y is not None:
plot(y[0, 0], y[0, 1], 'r*', markersize=10.0, markeredgewidth=.1)
@staticmethod
def get_gaussian_ellipse_artist(gaussian, nstd=1.96, linewidth=1):
"""
Returns an allipse artist for nstd times the standard deviation of this
Gaussian
"""
assert(isinstance(gaussian, Gaussian))
assert(gaussian.dimension == 2)
# compute eigenvalues (ordered)
vals, vecs = eigh(gaussian.L.dot(gaussian.L.T))
order = vals.argsort()[::-1]
vals, vecs = vals[order], vecs[:, order]
theta = numpy.degrees(arctan2(*vecs[:, 0][::-1]))
# width and height are "full" widths, not radius
width, height = 2 * nstd * sqrt(vals)
e = Ellipse(xy=gaussian.mu, width=width, height=height, angle=theta, \
edgecolor="red", fill=False, linewidth=linewidth)
return e
| bsd-2-clause |
antiface/mne-python | examples/decoding/plot_linear_model_patterns.py | 13 | 3098 | """
===============================================================
Linear classifier on sensor data with plot patterns and filters
===============================================================
Decoding, a.k.a MVPA or supervised machine learning applied to MEG and EEG
data in sensor space. Fit a linear classifier with the LinearModel object
providing topographical patterns which are more neurophysiologically
interpretable [1] than the classifier filters (weight vectors).
The patterns explain how the MEG and EEG data were generated from the
discriminant neural sources which are extracted by the filters.
Note patterns/filters in MEG data are more similar than EEG data
because the noise is less spatially correlated in MEG than EEG.
[1] Haufe, S., Meinecke, F., Görgen, K., Dähne, S., Haynes, J.-D.,
Blankertz, B., & Bießmann, F. (2014). On the interpretation of
weight vectors of linear models in multivariate neuroimaging.
NeuroImage, 87, 96–110. doi:10.1016/j.neuroimage.2013.10.067
"""
# Authors: Alexandre Gramfort <[email protected]>
# Romain Trachel <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.datasets import sample
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
# import a linear classifier from mne.decoding
from mne.decoding import LinearModel
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
decim=4, baseline=None, preload=True)
labels = epochs.events[:, -1]
# get MEG and EEG data
meg_epochs = epochs.pick_types(meg=True, eeg=False, copy=True)
meg_data = meg_epochs.get_data().reshape(len(labels), -1)
eeg_epochs = epochs.pick_types(meg=False, eeg=True, copy=True)
eeg_data = eeg_epochs.get_data().reshape(len(labels), -1)
###############################################################################
# Decoding in sensor space using a LogisticRegression classifier
clf = LogisticRegression()
sc = StandardScaler()
# create a linear model with LogisticRegression
model = LinearModel(clf)
# fit the classifier on MEG data
X = sc.fit_transform(meg_data)
model.fit(X, labels)
# plot patterns and filters
model.plot_patterns(meg_epochs.info, title='MEG Patterns')
model.plot_filters(meg_epochs.info, title='MEG Filters')
# fit the classifier on EEG data
X = sc.fit_transform(eeg_data)
model.fit(X, labels)
# plot patterns and filters
model.plot_patterns(eeg_epochs.info, title='EEG Patterns')
model.plot_filters(eeg_epochs.info, title='EEG Filters')
| bsd-3-clause |
johankaito/fufuka | microblog/flask/venv/lib/python2.7/site-packages/scipy/stats/_multivariate.py | 17 | 69089 | #
# Author: Joris Vankerschaver 2013
#
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
from scipy.misc import doccer
from scipy.special import gammaln, psi, multigammaln
from scipy._lib._util import check_random_state
__all__ = ['multivariate_normal', 'dirichlet', 'wishart', 'invwishart']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
def _process_parameters(dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be a scalar.")
# Check input sizes and return full arrays for mean and cov if necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." % dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""
Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""
A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD(object):
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_doc_random_state = """\
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
"""
_doc_frozen_callparams = ""
_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
docdict_params = {
'_doc_default_callparams': _doc_default_callparams,
'_doc_callparams_note': _doc_callparams_note,
'_doc_random_state': _doc_random_state
}
docdict_noparams = {
'_doc_default_callparams': _doc_frozen_callparams,
'_doc_callparams_note': _doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multi_rv_generic(object):
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super(multi_rv_generic, self).__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen(object):
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
class multivariate_normal_gen(multi_rv_generic):
r"""
A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.empty(x.shape + (2,))
>>> pos[:, :, 0] = x; pos[:, :, 1] = y
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super(multivariate_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean, cov, allow_singular=False):
"""
Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
x = _process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean, cov, allow_singular=False):
"""
Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
x = _process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self.dim, self.mean, self.cov = _process_parameters(None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
self._dist = multivariate_normal_gen(seed)
def logpdf(self, x):
x = _process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""
Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, " +
"but a.shape = %s." % str(alpha.shape))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have one entry less then the" +
" parameter vector 'a', but alpha.shape = " +
"%s and " % alpha.shape +
"x.shape = %s." % x.shape)
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) < 0:
raise ValueError("Each entry in 'x' must be greater or equal zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal " +
"simplex. but sum(x)=%f." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""
Internal helper function to compute the log of the useful quotient
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""
A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i \le 1
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
"""
def __init__(self, seed=None):
super(dirichlet_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0)
def logpdf(self, x, alpha):
"""
Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""
The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""
Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : scalar
Mean of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""
Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : scalar
Variance of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return out
def entropy(self, alpha):
"""
Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""
Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""
A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix).
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(wishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis,np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df < dim:
raise ValueError("Degrees of freedom cannot be less than dimension"
" of scale matrix, but df = %d" % df)
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.zeros(x.shape[-1])
scale_inv_x = np.zeros(x.shape)
tr_scale_inv_x = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:,:,i])
scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i])
tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""
Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""
Mean of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""
Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""
Variance of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : np.random.RandomState instance
RandomState used for drawing the random variates.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) + shape[::-1]).T
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None,None,None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""
Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""
Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""
Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
from numpy import asarray_chkfinite, asarray
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,))
tril_idx = np.tril_indices(a.shape[-2], k=-1)
triu_idx = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_idx] = a1[index][tril_idx]
return a1
class invwishart_gen(wishart_gen):
r"""
An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in
Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(invwishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.zeros(x.shape[-1])
#scale_x_inv = np.zeros(x.shape)
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
#scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""
Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""
Mean of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""
Mode of the inverse Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""
Variance of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super(invwishart_gen, self)._standard_rvs(n, shape, dim,
df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""
Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
| apache-2.0 |
harshaneelhg/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
ischwabacher/seaborn | seaborn/algorithms.py | 35 | 6889 | """Algorithms to support fitting routines in seaborn plotting functions."""
from __future__ import division
import numpy as np
from scipy import stats
from .external.six.moves import range
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default 10000
Number of iterations
axis : int, default None
Will pass axis to ``func`` as a keyword argument.
units : array, default None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
smooth : bool, default False
If True, performs a smoothed bootstrap (draws samples from a kernel
destiny estimate); only works for one-dimensional inputs and cannot
be used `units` is present.
func : callable, default np.mean
Function to call on the args that are passed in.
random_seed : int | None, default None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", np.mean)
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
smooth = kwargs.get("smooth", False)
random_seed = kwargs.get("random_seed", None)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rs = np.random.RandomState(random_seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
# Do the bootstrap
if smooth:
return _smooth_bootstrap(args, n_boot, func, func_kwargs)
if units is not None:
return _structured_bootstrap(args, n_boot, units, func,
func_kwargs, rs)
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n, n)
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, rs):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n_units, n_units)
sample = [np.take(a, resampler, axis=0) for a in args]
lengths = map(len, sample[0])
resampler = [rs.randint(0, n, n) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)]
for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _smooth_bootstrap(args, n_boot, func, func_kwargs):
"""Bootstrap by resampling from a kernel density estimate."""
n = len(args[0])
boot_dist = []
kde = [stats.gaussian_kde(np.transpose(a)) for a in args]
for i in range(int(n_boot)):
sample = [a.resample(n).T for a in kde]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def randomize_corrmat(a, tail="both", corrected=True, n_iter=1000,
random_seed=None, return_dist=False):
"""Test the significance of set of correlations with permutations.
By default this corrects for multiple comparisons across one side
of the matrix.
Parameters
----------
a : n_vars x n_obs array
array with variables as rows
tail : both | upper | lower
whether test should be two-tailed, or which tail to integrate over
corrected : boolean
if True reports p values with respect to the max stat distribution
n_iter : int
number of permutation iterations
random_seed : int or None
seed for RNG
return_dist : bool
if True, return n_vars x n_vars x n_iter
Returns
-------
p_mat : float
array of probabilites for actual correlation from null CDF
"""
if tail not in ["upper", "lower", "both"]:
raise ValueError("'tail' must be 'upper', 'lower', or 'both'")
rs = np.random.RandomState(random_seed)
a = np.asarray(a, np.float)
flat_a = a.ravel()
n_vars, n_obs = a.shape
# Do the permutations to establish a null distribution
null_dist = np.empty((n_vars, n_vars, n_iter))
for i_i in range(n_iter):
perm_i = np.concatenate([rs.permutation(n_obs) + (v * n_obs)
for v in range(n_vars)])
a_i = flat_a[perm_i].reshape(n_vars, n_obs)
null_dist[..., i_i] = np.corrcoef(a_i)
# Get the observed correlation values
real_corr = np.corrcoef(a)
# Figure out p values based on the permutation distribution
p_mat = np.zeros((n_vars, n_vars))
upper_tri = np.triu_indices(n_vars, 1)
if corrected:
if tail == "both":
max_dist = np.abs(null_dist[upper_tri]).max(axis=0)
elif tail == "lower":
max_dist = null_dist[upper_tri].min(axis=0)
elif tail == "upper":
max_dist = null_dist[upper_tri].max(axis=0)
cdf = lambda x: stats.percentileofscore(max_dist, x) / 100.
for i, j in zip(*upper_tri):
observed = real_corr[i, j]
if tail == "both":
p_ij = 1 - cdf(abs(observed))
elif tail == "lower":
p_ij = cdf(observed)
elif tail == "upper":
p_ij = 1 - cdf(observed)
p_mat[i, j] = p_ij
else:
for i, j in zip(*upper_tri):
null_corrs = null_dist[i, j]
cdf = lambda x: stats.percentileofscore(null_corrs, x) / 100.
observed = real_corr[i, j]
if tail == "both":
p_ij = 2 * (1 - cdf(abs(observed)))
elif tail == "lower":
p_ij = cdf(observed)
elif tail == "upper":
p_ij = 1 - cdf(observed)
p_mat[i, j] = p_ij
# Make p matrix symettrical with nans on the diagonal
p_mat += p_mat.T
p_mat[np.diag_indices(n_vars)] = np.nan
if return_dist:
return p_mat, null_dist
return p_mat
| bsd-3-clause |
OpenNeuroLab/brainspell-neo | archive/sprite/brainsprite.py | 2 | 7440 | # Christian Dansereau 2016 Copyright
import os
import numpy as np
import nibabel as nib
from PIL import Image
import json
from nilearn.image import resample_img
import hashlib, time
import matplotlib.pyplot as plt
from shutil import copyfile
def _load_json_template():
data_file = """{
"canvas": "3Dviewer",
"sprite": "spriteImg",
"flagCoordinates": true,
"nbSlice": {
"Y": 233,
"Z": 189
},
"colorBackground": "#000",
"colorFont": "#FFF",
"overlay": {
"sprite": "overlayImg",
"nbSlice": {
"Y": 233,
"Z": 189
},
"opacity": 0.7
},
"colorMap": {
"img": "colorMap",
"min": 0.2,
"max": 0.66
}
}
"""
data = json.loads(data_file)
return data
def _load_notebook_html(canvas_id, bkg_path, overlay_path, tmp_path, json_data):
html = """
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<div id="div_viewer">
<canvas id="{0}"> <!-- this is the canvas that will feature the brain slices -->
<img id="spriteImg" class="hidden" src="{1}"> <!-- load a hidden version of the sprite image that includes all (sagital) brain slices -->
<img id="overlayImg" class="hidden" src="{2}"> <!-- another sprite image, with an overlay-->
</div>
<script type="text/javascript" src="{3}jquery.min.js"></script> <!-- JQuery is used in this example, line 18, but is not actually used in brainsprite.js -->
<script type="text/javascript" src="{3}brainsprite.js"></script>
<script>
// On load: build all figures
$( "{0}" ).ready(function() {{
var brain = brainsprite({4});
}});
</script>
</body>
</html>
"""
return html.format(canvas_id, bkg_path, overlay_path, tmp_path, json_data)
def _loadVolume(source_file):
img = nib.load(source_file)
vol = img.get_data()
# check if its a nii file
ext = _getExt(source_file)
if ext == ".nii":
vol = np.swapaxes(vol, 0, 2)
return vol
def _getspec(vol):
nx, ny, nz = vol.shape
nrows = int(np.ceil(np.sqrt(nz)))
ncolumns = int(np.ceil(nz / (1. * nrows)))
return nrows, ncolumns, nx, ny, nz
def _getExt(source_file):
# Getting the extension
if os.path.splitext(source_file)[1] == '.gz':
extension = os.path.splitext(os.path.splitext(source_file)[0])[1]
else:
extension = os.path.splitext(source_file)[1]
return extension
def _montage(vol):
nrows, ncolumns, nx, ny, nz = _getspec(vol)
mosaic = np.zeros((nrows * nx, ncolumns * ny))
indx, indy = np.where(np.ones((nrows, ncolumns)))
for ii in range(vol.shape[2]):
# we need to flip the image in the x axis
mosaic[(indx[ii] * nx):((indx[ii] + 1) * nx), (indy[ii] * ny):((indy[ii] + 1) * ny)] = vol[::-1, :, ii]
return mosaic
def _saveMosaic(mosaic, output_path, overlay=False, overlay_threshold=0.1):
if overlay:
mosaic[mosaic < overlay_threshold] = 0
im = Image.fromarray(np.uint8(plt.cm.hot(mosaic) * 255))
mask = Image.fromarray(np.uint8(mosaic > 0) * 255).convert("L")
im.putalpha(mask)
else:
im = Image.fromarray(mosaic).convert('RGB')
# if im.mode != 'RGBA':
# im = im.convert('RGBA')
im.save(output_path)
def transform_package(img_path, output_folder, overlay_path=''):
if overlay_path == '':
transform(img_path, output_folder + 'bkg_mosaic.jpg', output_folder + 'params.js')
else:
transform(img_path, output_folder + 'bkg_mosaic.jpg', output_folder + 'params.js', overlay_path,
output_folder + 'overlay_mosaic.png')
def transform(source_bkg_path, out_bkg_path, out_json, source_overlay_path='', out_overlay_path='',
overlay_threshold=0.1, return_json=False, overlay_interpolation='continuous'):
# load data
bkg_vol = _loadVolume(source_bkg_path)
bkg_vol = (bkg_vol / float(bkg_vol.max())) * 255.
# populate json
params = _load_json_template()
params['nbSlice']['Y'] = bkg_vol.shape[1]
params['nbSlice']['Z'] = bkg_vol.shape[0]
# make bkg montage save
mosa_bkg = _montage(bkg_vol)
_saveMosaic(mosa_bkg, out_bkg_path)
if source_overlay_path != '':
# load data
bkimg = nib.load(source_bkg_path)
overimg = nib.load(source_overlay_path)
# transform slice order and resample to fit bkimg
# check if its a nii file
ext = _getExt(source_overlay_path)
ext_bkg = _getExt(source_bkg_path)
if ext == ".nii":
if ext_bkg == ".mnc":
bkimg.affine[:, [0, 2]] = bkimg.affine[:, [2, 0]]
overimg = resample_img(overimg, bkimg.affine, bkimg.shape[::-1], interpolation=overlay_interpolation)
overlay_vol = np.swapaxes(overimg.get_data(), 0, 2)
else:
overimg = nib.nifti1.Nifti1Image(overimg.get_data(), overimg.get_affine)
overimg = resample_img(overimg, bkimg.affine, bkimg.shape)
overlay_vol = overimg.get_data()
# populate json
params['overlay']['nbSlice']['Y'] = overlay_vol.shape[1]
params['overlay']['nbSlice']['Z'] = overlay_vol.shape[0]
# make overlay montage and save
mosa_overlay = _montage(overlay_vol)
_saveMosaic(mosa_overlay, out_overlay_path, overlay=True, overlay_threshold=overlay_threshold)
else:
del params['overlay']
del params['colorMap']
if out_json[-3:] == '.js':
with open(out_json, 'w') as outfile:
data = "var jsonParams = '" + json.dumps(params) + "';"
outfile.write(data)
else:
with open(out_json, 'w') as outfile:
data = json.dumps(params)
outfile.write(data)
if return_json:
return json.dumps(params)
def show_sprite(bkg_img, overlay_img, tmp_path):
# make a tmp folder
tmp_path = tmp_path + '/brainsprite_tmp/'
_make_folder(tmp_path)
copyfile('../brainsprite.js', tmp_path+'brainsprite.js')
copyfile('../assets/jquery-1.9.1/jquery.min.js', tmp_path + 'jquery.min.js')
hash = _gen_file_name()
bkgimg_ = tmp_path + hash + '_bkg.jpg'
overlayimg_ = tmp_path + hash + '_overlay_mosaic.png'
json_data = transform(bkg_img, bkgimg_, tmp_path + hash + '_params.json', overlay_img, overlayimg_, overlay_threshold=0.3, return_json=True)
json_data = json_data.replace("3Dviewer", "canvas" + hash)
print json_data
html_code = _load_notebook_html('canvas' + hash, 'brainsprite_tmp/' + hash + '_bkg.jpg', 'brainsprite_tmp/' + hash + '_overlay_mosaic.png', 'brainsprite_tmp/', json_data)
return html_code
def _make_folder(path):
if not os.path.exists(path):
os.makedirs(path)
return True
return False
def _gen_file_name():
hash_ = hashlib.sha1()
hash_.update(str(time.time()).encode('utf-8'))
return hash_.hexdigest()
def test_mosaic():
# custom path
background = "test_anat.mnc.gz"
overlay = "DMN.nii.gz"
output_folder = "/home/cdansereau/virenv/"
#background = "t2.nii.gz"
#overlay = "t2_seg.nii.gz"
#output_folder = "/home/cdansereau/t2/"
# transform data
transform(output_folder + background, output_folder + 'bkg_mosaic.jpg', output_folder + 'params.json',
output_folder + overlay, output_folder + 'overlay_mosaic.png', overlay_threshold=0.3)
| mit |
djgagne/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/IPython/kernel/zmq/eventloops.py | 4 | 8652 | # encoding: utf-8
"""Event loop integration for the ZeroMQ-based kernels.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import sys
# System library imports
import zmq
# Local imports
from IPython.config.application import Application
from IPython.utils import io
#------------------------------------------------------------------------------
# Eventloops for integrating the Kernel into different GUIs
#------------------------------------------------------------------------------
def _on_os_x_10_9():
import platform
from distutils.version import LooseVersion as V
return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9')
def _notify_stream_qt(kernel, stream):
from IPython.external.qt_for_kernel import QtCore
if _on_os_x_10_9() and kernel._darwin_app_nap:
from IPython.external.appnope import nope_scope as context
else:
from IPython.core.interactiveshell import NoOpContext as context
def process_stream_events():
while stream.getsockopt(zmq.EVENTS) & zmq.POLLIN:
with context():
kernel.do_one_iteration()
fd = stream.getsockopt(zmq.FD)
notifier = QtCore.QSocketNotifier(fd, QtCore.QSocketNotifier.Read, kernel.app)
notifier.activated.connect(process_stream_events)
def loop_qt4(kernel):
"""Start a kernel with PyQt4 event loop integration."""
from IPython.lib.guisupport import get_app_qt4, start_event_loop_qt4
kernel.app = get_app_qt4([" "])
kernel.app.setQuitOnLastWindowClosed(False)
for s in kernel.shell_streams:
_notify_stream_qt(kernel, s)
start_event_loop_qt4(kernel.app)
def loop_qt5(kernel):
"""Start a kernel with PyQt5 event loop integration"""
os.environ['QT_API'] = 'pyqt5'
return loop_qt4(kernel)
def loop_wx(kernel):
"""Start a kernel with wx event loop support."""
import wx
from IPython.lib.guisupport import start_event_loop_wx
if _on_os_x_10_9() and kernel._darwin_app_nap:
# we don't hook up App Nap contexts for Wx,
# just disable it outright.
from IPython.external.appnope import nope
nope()
doi = kernel.do_one_iteration
# Wx uses milliseconds
poll_interval = int(1000*kernel._poll_interval)
# We have to put the wx.Timer in a wx.Frame for it to fire properly.
# We make the Frame hidden when we create it in the main app below.
class TimerFrame(wx.Frame):
def __init__(self, func):
wx.Frame.__init__(self, None, -1)
self.timer = wx.Timer(self)
# Units for the timer are in milliseconds
self.timer.Start(poll_interval)
self.Bind(wx.EVT_TIMER, self.on_timer)
self.func = func
def on_timer(self, event):
self.func()
# We need a custom wx.App to create our Frame subclass that has the
# wx.Timer to drive the ZMQ event loop.
class IPWxApp(wx.App):
def OnInit(self):
self.frame = TimerFrame(doi)
self.frame.Show(False)
return True
# The redirect=False here makes sure that wx doesn't replace
# sys.stdout/stderr with its own classes.
kernel.app = IPWxApp(redirect=False)
# The import of wx on Linux sets the handler for signal.SIGINT
# to 0. This is a bug in wx or gtk. We fix by just setting it
# back to the Python default.
import signal
if not callable(signal.getsignal(signal.SIGINT)):
signal.signal(signal.SIGINT, signal.default_int_handler)
start_event_loop_wx(kernel.app)
def loop_tk(kernel):
"""Start a kernel with the Tk event loop."""
try:
from tkinter import Tk # Py 3
except ImportError:
from Tkinter import Tk # Py 2
doi = kernel.do_one_iteration
# Tk uses milliseconds
poll_interval = int(1000*kernel._poll_interval)
# For Tkinter, we create a Tk object and call its withdraw method.
class Timer(object):
def __init__(self, func):
self.app = Tk()
self.app.withdraw()
self.func = func
def on_timer(self):
self.func()
self.app.after(poll_interval, self.on_timer)
def start(self):
self.on_timer() # Call it once to get things going.
self.app.mainloop()
kernel.timer = Timer(doi)
kernel.timer.start()
def loop_gtk(kernel):
"""Start the kernel, coordinating with the GTK event loop"""
from .gui.gtkembed import GTKEmbed
gtk_kernel = GTKEmbed(kernel)
gtk_kernel.start()
def loop_cocoa(kernel):
"""Start the kernel, coordinating with the Cocoa CFRunLoop event loop
via the matplotlib MacOSX backend.
"""
import matplotlib
if matplotlib.__version__ < '1.1.0':
kernel.log.warn(
"MacOSX backend in matplotlib %s doesn't have a Timer, "
"falling back on Tk for CFRunLoop integration. Note that "
"even this won't work if Tk is linked against X11 instead of "
"Cocoa (e.g. EPD). To use the MacOSX backend in the kernel, "
"you must use matplotlib >= 1.1.0, or a native libtk."
)
return loop_tk(kernel)
from matplotlib.backends.backend_macosx import TimerMac, show
# scale interval for sec->ms
poll_interval = int(1000*kernel._poll_interval)
real_excepthook = sys.excepthook
def handle_int(etype, value, tb):
"""don't let KeyboardInterrupts look like crashes"""
if etype is KeyboardInterrupt:
io.raw_print("KeyboardInterrupt caught in CFRunLoop")
else:
real_excepthook(etype, value, tb)
# add doi() as a Timer to the CFRunLoop
def doi():
# restore excepthook during IPython code
sys.excepthook = real_excepthook
kernel.do_one_iteration()
# and back:
sys.excepthook = handle_int
t = TimerMac(poll_interval)
t.add_callback(doi)
t.start()
# but still need a Poller for when there are no active windows,
# during which time mainloop() returns immediately
poller = zmq.Poller()
if kernel.control_stream:
poller.register(kernel.control_stream.socket, zmq.POLLIN)
for stream in kernel.shell_streams:
poller.register(stream.socket, zmq.POLLIN)
while True:
try:
# double nested try/except, to properly catch KeyboardInterrupt
# due to pyzmq Issue #130
try:
# don't let interrupts during mainloop invoke crash_handler:
sys.excepthook = handle_int
show.mainloop()
sys.excepthook = real_excepthook
# use poller if mainloop returned (no windows)
# scale by extra factor of 10, since it's a real poll
poller.poll(10*poll_interval)
kernel.do_one_iteration()
except:
raise
except KeyboardInterrupt:
# Ctrl-C shouldn't crash the kernel
io.raw_print("KeyboardInterrupt caught in kernel")
finally:
# ensure excepthook is restored
sys.excepthook = real_excepthook
# mapping of keys to loop functions
loop_map = {
'qt' : loop_qt4,
'qt4': loop_qt4,
'qt5': loop_qt5,
'inline': None,
'nbagg': None,
'osx': loop_cocoa,
'wx' : loop_wx,
'tk' : loop_tk,
'gtk': loop_gtk,
None : None,
}
def enable_gui(gui, kernel=None):
"""Enable integration with a given GUI"""
if gui not in loop_map:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, loop_map.keys())
raise ValueError(e)
if kernel is None:
if Application.initialized():
kernel = getattr(Application.instance(), 'kernel', None)
if kernel is None:
raise RuntimeError("You didn't specify a kernel,"
" and no IPython Application with a kernel appears to be running."
)
loop = loop_map[gui]
if loop and kernel.eventloop is not None and kernel.eventloop is not loop:
raise RuntimeError("Cannot activate multiple GUI eventloops")
kernel.eventloop = loop
| bsd-3-clause |
iamaziz/simpleai | simpleai/machine_learning/reinforcement_learning.py | 5 | 6345 | # -*- coding: utf-8 -*-
from collections import defaultdict, Counter
import math
import random
from simpleai.search.utils import argmax
import pickle
try:
import matplotlib.pyplot as plt
import numpy
except:
plt = None # lint:ok
numpy = None # lint:ok
def make_at_least_n_times(optimistic_reward, min_n):
def at_least_n_times_exploration(actions, utilities, temperature, action_counter):
utilities = [utilities[x] for x in actions]
for i, utility in enumerate(utilities):
if action_counter[actions[i]] < min_n:
utilities[i] = optimistic_reward
d = dict(zip(actions, utilities))
uf = lambda action: d[action]
return argmax(actions, uf)
return at_least_n_times_exploration
def boltzmann_exploration(actions, utilities, temperature, action_counter):
'''returns an action with a probability depending on utilities and temperature'''
utilities = [utilities[x] for x in actions]
temperature = max(temperature, 0.01)
_max = max(utilities)
_min = min(utilities)
if _max == _min:
return random.choice(actions)
utilities = [math.exp(((u - _min) / (_max - _min)) / temperature) for u in utilities]
probs = [u / sum(utilities) for u in utilities]
i = 0
tot = probs[i]
r = random.random()
while i < len(actions) and r >= tot:
i += 1
tot += probs[i]
return actions[i]
def make_exponential_temperature(initial_temperature, alpha):
'''returns a function like initial / exp(n * alpha)'''
def _function(n):
try:
return initial_temperature / math.exp(n * alpha)
except OverflowError:
return 0.01
return _function
class PerformanceCounter(object):
def __init__(self, learners, names=None):
self.learners = learners
for i, learner in enumerate(learners):
self.update_set_reward(learner)
learner.accumulated_rewards = []
learner.known_states = []
learner.temperatures = []
if names is None:
learner.name = 'Learner %d' % i
else:
learner.name = names[i]
def update_set_reward(self, learner):
def set_reward(reward, terminal=False):
if terminal:
if len(learner.accumulated_rewards) > 0:
learner.accumulated_rewards.append(learner.accumulated_rewards[-1] + reward)
else:
learner.accumulated_rewards.append(reward)
learner.known_states.append(len(learner.Q))
learner.temperatures.append(learner.temperature_function(learner.trials))
learner.old_set_reward(reward, terminal)
learner.old_set_reward = learner.set_reward
learner.set_reward = set_reward
def _make_plot(self, ax, data_name):
for learner in self.learners:
data = numpy.array(getattr(learner, data_name))
ax.plot(numpy.arange(len(data)), data, label=learner.name)
nice_name = data_name.replace('_', ' ').capitalize()
ax.set_title(nice_name)
ax.legend()
def show_statistics(self):
f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)
self._make_plot(ax1, 'accumulated_rewards')
self._make_plot(ax2, 'known_states')
self._make_plot(ax3, 'temperatures')
plt.show()
class RLProblem(object):
def actions(self, state):
'''Returns the actions available to perform from `state`.
The returned value is an iterable over actions.
'''
raise NotImplementedError()
def update_state(self, percept, agent):
'Override this method if you need to clean perception to a given agent'
return percept
def inverse(n):
if n == 0:
return 1
return 1.0 / n
def state_default():
return defaultdict(int)
class QLearner(object):
def __init__(self, problem, temperature_function=inverse,
discount_factor=1,
exploration_function=boltzmann_exploration,
learning_rate=inverse):
self.Q = defaultdict(state_default)
self.problem = problem
self.discount_factor = discount_factor
self.temperature_function = temperature_function
self.exploration_function = exploration_function
self.learning_rate = learning_rate
self.last_state = None
self.last_action = None
self.last_reward = None
self.counter = defaultdict(Counter)
self.trials = 0
def set_reward(self, reward, terminal=False):
self.last_reward = reward
if terminal:
self.trials += 1
self.Q[self.last_state][self.last_action] = reward
def program(self, percept):
s = self.last_state
a = self.last_action
state = self.problem.update_state(percept, self)
actions = self.problem.actions(state)
if len(actions) > 0:
current_action = self.exploration_function(actions, self.Q[state],
self.temperature_function(self.trials),
self.counter[state])
else:
current_action = None
if s is not None and current_action:
self.counter[s][a] += 1
self.update_rule(s, a, self.last_reward, state, current_action)
self.last_state = state
self.last_action = current_action
return current_action
def update_rule(self, s, a, r, cs, ca):
raise NotImplementedError
def dump(self, path):
self.temperature_function = inverse
with open(path, 'wb') as f:
pickle.dump(self, f)
@classmethod
def load(self, path):
with open(path, 'rb') as f:
return pickle.load(f)
class TDQLearner(QLearner):
def update_rule(self, s, a, r, cs, ca):
lr = self.learning_rate(self.counter[s][a])
self.Q[s][a] += lr * (r + self.discount_factor * max(self.Q[cs].values()) - self.Q[s][a])
class SARSALearner(QLearner):
def update_rule(self, s, a, r, cs, ca):
lr = self.learning_rate(self.counter[s][a])
self.Q[s][a] += lr * (r + self.discount_factor * self.Q[cs][ca] - self.Q[s][a])
| mit |
2baOrNot2ba/AntPat | scripts/viewJonespat_dual.py | 1 | 2897 | #!/usr/bin/env python
"""A simple viewer for Jones patterns for dual-polarized representations.
"""
import argparse
import numpy
import matplotlib.pyplot as plt
from antpat.reps.sphgridfun.pntsonsphere import ZenHemisphGrid
from antpat.dualpolelem import DualPolElem, jones2gIXR, IXRJ2IXRM
from antpat.reps.hamaker import convLOFARcc2DPE
import antpat.io.filetypes as antfiles
def plotJonesCanonical(theta, phi, jones, dpelemname):
normalize = True
dbscale = True
polarplt = True
IXRTYPE = 'IXR_J' # Can be IXR_J or IXR_M
g, IXRJ = jones2gIXR(jones)
IXRM = IXRJ2IXRM(IXRJ)
if IXRTYPE == 'IXR_J':
IXR = IXRJ
elif IXRTYPE == 'IXR_J':
IXR = IXRM
else:
raise RuntimeError("""Error: IXR type {} unknown.
Known types are IXR_J, IXR_M.""".format(IXRTYPE))
fig = plt.figure()
fig.suptitle(dpelemname)
plt.subplot(121, polar=polarplt)
if normalize:
g_max = numpy.max(g)
g = g/g_max
if dbscale:
g = 20*numpy.log10(g)
# nrlvls = 5
# g_lvls = numpy.max(g) - 3.0*numpy.arange(nrlvls)
plt.pcolormesh(phi, numpy.rad2deg(theta), g)
# plt.contour( phi, numpy.rad2deg(theta), g_dress, levels = g_lvls)
plt.colorbar()
plt.title('Amp gain')
plt.subplot(122, polar=polarplt)
plt.pcolormesh(phi, numpy.rad2deg(theta), 10*numpy.log10(IXR))
plt.colorbar()
plt.title('IXR_J')
plt.show()
def plotFFpat():
from antpat.reps.sphgridfun import tvecfun
for polchan in [0, 1]:
E_th = jones[:, :, polchan, 0].squeeze()
E_ph = jones[:, :, polchan, 1].squeeze()
tvecfun.plotvfonsph(THETA, PHI, E_th, E_ph, args.freq,
vcoordlist=['Ludwig3'], projection='orthographic')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("freq", type=float,
help="Frequency in Hertz")
parser.add_argument("filename", help="""
Filename of dual-polarization FF, Hamaker-Arts format,
or a single-polarization FF (p-channel)""")
parser.add_argument("filename_q", nargs='?',
help="""
Filename of second (q-channel) single-polarization FF.
""")
args = parser.parse_args()
if args.filename.endswith(antfiles.HamArtsuffix):
hp = convLOFARcc2DPE(args.filename, [args.freq])
elif args.filename.endswith(antfiles.FEKOsuffix):
hp = DualPolElem()
hp.load_ffes(args.filename, args.filename_q)
else:
raise RuntimeError("dual-pol pattern file type not known")
THETA, PHI = ZenHemisphGrid()
jones = hp.getJonesAlong([args.freq], (THETA, PHI))
plotFFpat()
# plotJonesCanonical(THETA, PHI, jones, os.path.basename(args.filename)
# + ' (' + str(args.freq/1e6) + ' MHz)')
| isc |
metaml/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/lines.py | 69 | 48233 | """
This module contains all the 2D line class which can draw with a
variety of line styles, markers and colors.
"""
# TODO: expose cap and join style attrs
from __future__ import division
import numpy as np
from numpy import ma
from matplotlib import verbose
import artist
from artist import Artist
from cbook import iterable, is_string_like, is_numlike, ls_mapper, dedent,\
flatten
from colors import colorConverter
from path import Path
from transforms import Affine2D, Bbox, TransformedPath, IdentityTransform
from matplotlib import rcParams
# special-purpose marker identifiers:
(TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN,
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN) = range(8)
# COVERAGE NOTE: Never called internally or from examples
def unmasked_index_ranges(mask, compressed = True):
warnings.warn("Import this directly from matplotlib.cbook",
DeprecationWarning)
# Warning added 2008/07/22
from matplotlib.cbook import unmasked_index_ranges as _unmasked_index_ranges
return _unmasked_index_ranges(mask, compressed=compressed)
def segment_hits(cx, cy, x, y, radius):
"""
Determine if any line segments are within radius of a
point. Returns the list of line segments that are within that
radius.
"""
# Process single points specially
if len(x) < 2:
res, = np.nonzero( (cx - x)**2 + (cy - y)**2 <= radius**2 )
return res
# We need to lop the last element off a lot.
xr,yr = x[:-1],y[:-1]
# Only look at line segments whose nearest point to C on the line
# lies within the segment.
dx,dy = x[1:]-xr, y[1:]-yr
Lnorm_sq = dx**2+dy**2 # Possibly want to eliminate Lnorm==0
u = ( (cx-xr)*dx + (cy-yr)*dy )/Lnorm_sq
candidates = (u>=0) & (u<=1)
#if any(candidates): print "candidates",xr[candidates]
# Note that there is a little area near one side of each point
# which will be near neither segment, and another which will
# be near both, depending on the angle of the lines. The
# following radius test eliminates these ambiguities.
point_hits = (cx - x)**2 + (cy - y)**2 <= radius**2
#if any(point_hits): print "points",xr[candidates]
candidates = candidates & ~(point_hits[:-1] | point_hits[1:])
# For those candidates which remain, determine how far they lie away
# from the line.
px,py = xr+u*dx,yr+u*dy
line_hits = (cx-px)**2 + (cy-py)**2 <= radius**2
#if any(line_hits): print "lines",xr[candidates]
line_hits = line_hits & candidates
points, = point_hits.ravel().nonzero()
lines, = line_hits.ravel().nonzero()
#print points,lines
return np.concatenate((points,lines))
class Line2D(Artist):
"""
A line - the line can have both a solid linestyle connecting all
the vertices, and a marker at each vertex. Additionally, the
drawing of the solid line is influenced by the drawstyle, eg one
can create "stepped" lines in various styles.
"""
lineStyles = _lineStyles = { # hidden names deprecated
'-' : '_draw_solid',
'--' : '_draw_dashed',
'-.' : '_draw_dash_dot',
':' : '_draw_dotted',
'None' : '_draw_nothing',
' ' : '_draw_nothing',
'' : '_draw_nothing',
}
_drawStyles_l = {
'default' : '_draw_lines',
'steps-mid' : '_draw_steps_mid',
'steps-pre' : '_draw_steps_pre',
'steps-post' : '_draw_steps_post',
}
_drawStyles_s = {
'steps' : '_draw_steps_pre',
}
drawStyles = {}
drawStyles.update(_drawStyles_l)
drawStyles.update(_drawStyles_s)
markers = _markers = { # hidden names deprecated
'.' : '_draw_point',
',' : '_draw_pixel',
'o' : '_draw_circle',
'v' : '_draw_triangle_down',
'^' : '_draw_triangle_up',
'<' : '_draw_triangle_left',
'>' : '_draw_triangle_right',
'1' : '_draw_tri_down',
'2' : '_draw_tri_up',
'3' : '_draw_tri_left',
'4' : '_draw_tri_right',
's' : '_draw_square',
'p' : '_draw_pentagon',
'*' : '_draw_star',
'h' : '_draw_hexagon1',
'H' : '_draw_hexagon2',
'+' : '_draw_plus',
'x' : '_draw_x',
'D' : '_draw_diamond',
'd' : '_draw_thin_diamond',
'|' : '_draw_vline',
'_' : '_draw_hline',
TICKLEFT : '_draw_tickleft',
TICKRIGHT : '_draw_tickright',
TICKUP : '_draw_tickup',
TICKDOWN : '_draw_tickdown',
CARETLEFT : '_draw_caretleft',
CARETRIGHT : '_draw_caretright',
CARETUP : '_draw_caretup',
CARETDOWN : '_draw_caretdown',
'None' : '_draw_nothing',
' ' : '_draw_nothing',
'' : '_draw_nothing',
}
filled_markers = ('o', '^', 'v', '<', '>',
's', 'd', 'D', 'h', 'H', 'p', '*')
zorder = 2
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
def __str__(self):
if self._label != "":
return "Line2D(%s)"%(self._label)
elif hasattr(self, '_x') and len(self._x) > 3:
return "Line2D((%g,%g),(%g,%g),...,(%g,%g))"\
%(self._x[0],self._y[0],self._x[0],self._y[0],self._x[-1],self._y[-1])
elif hasattr(self, '_x'):
return "Line2D(%s)"\
%(",".join(["(%g,%g)"%(x,y) for x,y in zip(self._x,self._y)]))
else:
return "Line2D()"
def __init__(self, xdata, ydata,
linewidth = None, # all Nones default to rc
linestyle = None,
color = None,
marker = None,
markersize = None,
markeredgewidth = None,
markeredgecolor = None,
markerfacecolor = None,
antialiased = None,
dash_capstyle = None,
solid_capstyle = None,
dash_joinstyle = None,
solid_joinstyle = None,
pickradius = 5,
drawstyle = None,
**kwargs
):
"""
Create a :class:`~matplotlib.lines.Line2D` instance with *x*
and *y* data in sequences *xdata*, *ydata*.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
See :meth:`set_linestyle` for a decription of the line styles,
:meth:`set_marker` for a description of the markers, and
:meth:`set_drawstyle` for a description of the draw styles.
"""
Artist.__init__(self)
#convert sequences to numpy arrays
if not iterable(xdata):
raise RuntimeError('xdata must be a sequence')
if not iterable(ydata):
raise RuntimeError('ydata must be a sequence')
if linewidth is None : linewidth=rcParams['lines.linewidth']
if linestyle is None : linestyle=rcParams['lines.linestyle']
if marker is None : marker=rcParams['lines.marker']
if color is None : color=rcParams['lines.color']
if markersize is None : markersize=rcParams['lines.markersize']
if antialiased is None : antialiased=rcParams['lines.antialiased']
if dash_capstyle is None : dash_capstyle=rcParams['lines.dash_capstyle']
if dash_joinstyle is None : dash_joinstyle=rcParams['lines.dash_joinstyle']
if solid_capstyle is None : solid_capstyle=rcParams['lines.solid_capstyle']
if solid_joinstyle is None : solid_joinstyle=rcParams['lines.solid_joinstyle']
if drawstyle is None : drawstyle='default'
self.set_dash_capstyle(dash_capstyle)
self.set_dash_joinstyle(dash_joinstyle)
self.set_solid_capstyle(solid_capstyle)
self.set_solid_joinstyle(solid_joinstyle)
self.set_linestyle(linestyle)
self.set_drawstyle(drawstyle)
self.set_linewidth(linewidth)
self.set_color(color)
self.set_marker(marker)
self.set_antialiased(antialiased)
self.set_markersize(markersize)
self._dashSeq = None
self.set_markerfacecolor(markerfacecolor)
self.set_markeredgecolor(markeredgecolor)
self.set_markeredgewidth(markeredgewidth)
self._point_size_reduction = 0.5
self.verticalOffset = None
# update kwargs before updating data to give the caller a
# chance to init axes (and hence unit support)
self.update(kwargs)
self.pickradius = pickradius
if is_numlike(self._picker):
self.pickradius = self._picker
self._xorig = np.asarray([])
self._yorig = np.asarray([])
self._invalid = True
self.set_data(xdata, ydata)
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the line. The pick
radius determines the precision of the location test (usually
within five points of the value). Use
:meth:`~matplotlib.lines.Line2D.get_pickradius` or
:meth:`~matplotlib.lines.Line2D.set_pickradius` to view or
modify it.
Returns *True* if any values are within the radius along with
``{'ind': pointlist}``, where *pointlist* is the set of points
within the radius.
TODO: sort returned indices by distance
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not is_numlike(self.pickradius):
raise ValueError,"pick radius should be a distance"
# Make sure we have data to plot
if self._invalid:
self.recache()
if len(self._xy)==0: return False,{}
# Convert points to pixels
path, affine = self._transformed_path.get_transformed_path_and_affine()
path = affine.transform_path(path)
xy = path.vertices
xt = xy[:, 0]
yt = xy[:, 1]
# Convert pick radius from points to pixels
if self.figure == None:
warning.warn('no figure set when check if mouse is on line')
pixels = self.pickradius
else:
pixels = self.figure.dpi/72. * self.pickradius
# Check for collision
if self._linestyle in ['None',None]:
# If no line, return the nearby point(s)
d = (xt-mouseevent.x)**2 + (yt-mouseevent.y)**2
ind, = np.nonzero(np.less_equal(d, pixels**2))
else:
# If line, return the nearby segment(s)
ind = segment_hits(mouseevent.x,mouseevent.y,xt,yt,pixels)
# Debugging message
if False and self._label != u'':
print "Checking line",self._label,"at",mouseevent.x,mouseevent.y
print 'xt', xt
print 'yt', yt
#print 'dx,dy', (xt-mouseevent.x)**2., (yt-mouseevent.y)**2.
print 'ind',ind
# Return the point(s) within radius
return len(ind)>0,dict(ind=ind)
def get_pickradius(self):
'return the pick radius used for containment tests'
return self.pickradius
def setpickradius(self,d):
"""Sets the pick radius used for containment tests
ACCEPTS: float distance in points
"""
self.pickradius = d
def set_picker(self,p):
"""Sets the event picker details for the line.
ACCEPTS: float distance in points or callable pick function
``fn(artist, event)``
"""
if callable(p):
self._contains = p
else:
self.pickradius = p
self._picker = p
def get_window_extent(self, renderer):
bbox = Bbox.unit()
bbox.update_from_data_xy(self.get_transform().transform(self.get_xydata()),
ignore=True)
# correct for marker size, if any
if self._marker is not None:
ms = (self._markersize / 72.0 * self.figure.dpi) * 0.5
bbox = bbox.padded(ms)
return bbox
def set_axes(self, ax):
Artist.set_axes(self, ax)
if ax.xaxis is not None:
self._xcid = ax.xaxis.callbacks.connect('units', self.recache)
if ax.yaxis is not None:
self._ycid = ax.yaxis.callbacks.connect('units', self.recache)
set_axes.__doc__ = Artist.set_axes.__doc__
def set_data(self, *args):
"""
Set the x and y data
ACCEPTS: 2D array
"""
if len(args)==1:
x, y = args[0]
else:
x, y = args
not_masked = 0
if not ma.isMaskedArray(x):
x = np.asarray(x)
not_masked += 1
if not ma.isMaskedArray(y):
y = np.asarray(y)
not_masked += 1
if (not_masked < 2 or
(x is not self._xorig and
(x.shape != self._xorig.shape or np.any(x != self._xorig))) or
(y is not self._yorig and
(y.shape != self._yorig.shape or np.any(y != self._yorig)))):
self._xorig = x
self._yorig = y
self._invalid = True
def recache(self):
#if self.axes is None: print 'recache no axes'
#else: print 'recache units', self.axes.xaxis.units, self.axes.yaxis.units
if ma.isMaskedArray(self._xorig) or ma.isMaskedArray(self._yorig):
x = ma.asarray(self.convert_xunits(self._xorig), float)
y = ma.asarray(self.convert_yunits(self._yorig), float)
x = ma.ravel(x)
y = ma.ravel(y)
else:
x = np.asarray(self.convert_xunits(self._xorig), float)
y = np.asarray(self.convert_yunits(self._yorig), float)
x = np.ravel(x)
y = np.ravel(y)
if len(x)==1 and len(y)>1:
x = x * np.ones(y.shape, float)
if len(y)==1 and len(x)>1:
y = y * np.ones(x.shape, float)
if len(x) != len(y):
raise RuntimeError('xdata and ydata must be the same length')
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if ma.isMaskedArray(x) or ma.isMaskedArray(y):
self._xy = ma.concatenate((x, y), 1)
else:
self._xy = np.concatenate((x, y), 1)
self._x = self._xy[:, 0] # just a view
self._y = self._xy[:, 1] # just a view
# Masked arrays are now handled by the Path class itself
self._path = Path(self._xy)
self._transformed_path = TransformedPath(self._path, self.get_transform())
self._invalid = False
def set_transform(self, t):
"""
set the Transformation instance used by this artist
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Artist.set_transform(self, t)
self._invalid = True
# self._transformed_path = TransformedPath(self._path, self.get_transform())
def _is_sorted(self, x):
"return true if x is sorted"
if len(x)<2: return 1
return np.alltrue(x[1:]-x[0:-1]>=0)
def draw(self, renderer):
if self._invalid:
self.recache()
renderer.open_group('line2d')
if not self._visible: return
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self._color)
gc.set_antialiased(self._antialiased)
gc.set_linewidth(self._linewidth)
gc.set_alpha(self._alpha)
if self.is_dashed():
cap = self._dashcapstyle
join = self._dashjoinstyle
else:
cap = self._solidcapstyle
join = self._solidjoinstyle
gc.set_joinstyle(join)
gc.set_capstyle(cap)
gc.set_snap(self.get_snap())
funcname = self._lineStyles.get(self._linestyle, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_path_and_affine()
self._lineFunc = getattr(self, funcname)
funcname = self.drawStyles.get(self._drawstyle, '_draw_lines')
drawFunc = getattr(self, funcname)
drawFunc(renderer, gc, tpath, affine.frozen())
if self._marker is not None:
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self.get_markeredgecolor())
gc.set_linewidth(self._markeredgewidth)
gc.set_alpha(self._alpha)
funcname = self._markers.get(self._marker, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_points_and_affine()
markerFunc = getattr(self, funcname)
markerFunc(renderer, gc, tpath, affine.frozen())
renderer.close_group('line2d')
def get_antialiased(self): return self._antialiased
def get_color(self): return self._color
def get_drawstyle(self): return self._drawstyle
def get_linestyle(self): return self._linestyle
def get_linewidth(self): return self._linewidth
def get_marker(self): return self._marker
def get_markeredgecolor(self):
if (is_string_like(self._markeredgecolor) and
self._markeredgecolor == 'auto'):
if self._marker in self.filled_markers:
return 'k'
else:
return self._color
else:
return self._markeredgecolor
return self._markeredgecolor
def get_markeredgewidth(self): return self._markeredgewidth
def get_markerfacecolor(self):
if (self._markerfacecolor is None or
(is_string_like(self._markerfacecolor) and
self._markerfacecolor.lower()=='none') ):
return self._markerfacecolor
elif (is_string_like(self._markerfacecolor) and
self._markerfacecolor.lower() == 'auto'):
return self._color
else:
return self._markerfacecolor
def get_markersize(self): return self._markersize
def get_data(self, orig=True):
"""
Return the xdata, ydata.
If *orig* is *True*, return the original data
"""
return self.get_xdata(orig=orig), self.get_ydata(orig=orig)
def get_xdata(self, orig=True):
"""
Return the xdata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._xorig
if self._invalid:
self.recache()
return self._x
def get_ydata(self, orig=True):
"""
Return the ydata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._yorig
if self._invalid:
self.recache()
return self._y
def get_path(self):
"""
Return the :class:`~matplotlib.path.Path` object associated
with this line.
"""
if self._invalid:
self.recache()
return self._path
def get_xydata(self):
"""
Return the *xy* data as a Nx2 numpy array.
"""
if self._invalid:
self.recache()
return self._xy
def set_antialiased(self, b):
"""
True if line should be drawin with antialiased rendering
ACCEPTS: [True | False]
"""
self._antialiased = b
def set_color(self, color):
"""
Set the color of the line
ACCEPTS: any matplotlib color
"""
self._color = color
def set_drawstyle(self, drawstyle):
"""
Set the drawstyle of the plot
'default' connects the points with lines. The steps variants
produce step-plots. 'steps' is equivalent to 'steps-pre' and
is maintained for backward-compatibility.
ACCEPTS: [ 'default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post' ]
"""
self._drawstyle = drawstyle
def set_linewidth(self, w):
"""
Set the line width in points
ACCEPTS: float value in points
"""
self._linewidth = w
def set_linestyle(self, linestyle):
"""
Set the linestyle of the line (also accepts drawstyles)
================ =================
linestyle description
================ =================
'-' solid
'--' dashed
'-.' dash_dot
':' dotted
'None' draw nothing
' ' draw nothing
'' draw nothing
================ =================
'steps' is equivalent to 'steps-pre' and is maintained for
backward-compatibility.
.. seealso::
:meth:`set_drawstyle`
ACCEPTS: [ '-' | '--' | '-.' | ':' | 'None' | ' ' | '' ] and
any drawstyle in combination with a linestyle, e.g. 'steps--'.
"""
# handle long drawstyle names before short ones !
for ds in flatten([k.keys() for k in (self._drawStyles_l,
self._drawStyles_s)], is_string_like):
if linestyle.startswith(ds):
self.set_drawstyle(ds)
if len(linestyle) > len(ds):
linestyle = linestyle[len(ds):]
else:
linestyle = '-'
if linestyle not in self._lineStyles:
if linestyle in ls_mapper:
linestyle = ls_mapper[linestyle]
else:
verbose.report('Unrecognized line style %s, %s' %
(linestyle, type(linestyle)))
if linestyle in [' ','']:
linestyle = 'None'
self._linestyle = linestyle
def set_marker(self, marker):
"""
Set the line marker
========== ==========================
marker description
========== ==========================
'.' point
',' pixel
'o' circle
'v' triangle_down
'^' triangle_up
'<' triangle_left
'>' triangle_right
'1' tri_down
'2' tri_up
'3' tri_left
'4' tri_right
's' square
'p' pentagon
'*' star
'h' hexagon1
'H' hexagon2
'+' plus
'x' x
'D' diamond
'd' thin_diamond
'|' vline
'_' hline
TICKLEFT tickleft
TICKRIGHT tickright
TICKUP tickup
TICKDOWN tickdown
CARETLEFT caretleft
CARETRIGHT caretright
CARETUP caretup
CARETDOWN caretdown
'None' nothing
' ' nothing
'' nothing
========== ==========================
ACCEPTS: [ '+' | '*' | ',' | '.' | '1' | '2' | '3' | '4'
| '<' | '>' | 'D' | 'H' | '^' | '_' | 'd'
| 'h' | 'o' | 'p' | 's' | 'v' | 'x' | '|'
| TICKUP | TICKDOWN | TICKLEFT | TICKRIGHT
| 'None' | ' ' | '' ]
"""
if marker not in self._markers:
verbose.report('Unrecognized marker style %s, %s' %
(marker, type(marker)))
if marker in [' ','']:
marker = 'None'
self._marker = marker
self._markerFunc = self._markers[marker]
def set_markeredgecolor(self, ec):
"""
Set the marker edge color
ACCEPTS: any matplotlib color
"""
if ec is None :
ec = 'auto'
self._markeredgecolor = ec
def set_markeredgewidth(self, ew):
"""
Set the marker edge width in points
ACCEPTS: float value in points
"""
if ew is None :
ew = rcParams['lines.markeredgewidth']
self._markeredgewidth = ew
def set_markerfacecolor(self, fc):
"""
Set the marker face color
ACCEPTS: any matplotlib color
"""
if fc is None :
fc = 'auto'
self._markerfacecolor = fc
def set_markersize(self, sz):
"""
Set the marker size in points
ACCEPTS: float
"""
self._markersize = sz
def set_xdata(self, x):
"""
Set the data np.array for x
ACCEPTS: 1D array
"""
x = np.asarray(x)
self.set_data(x, self._yorig)
def set_ydata(self, y):
"""
Set the data np.array for y
ACCEPTS: 1D array
"""
y = np.asarray(y)
self.set_data(self._xorig, y)
def set_dashes(self, seq):
"""
Set the dash sequence, sequence of dashes with on off ink in
points. If seq is empty or if seq = (None, None), the
linestyle will be set to solid.
ACCEPTS: sequence of on/off ink in points
"""
if seq == (None, None) or len(seq)==0:
self.set_linestyle('-')
else:
self.set_linestyle('--')
self._dashSeq = seq # TODO: offset ignored for now
def _draw_lines(self, renderer, gc, path, trans):
self._lineFunc(renderer, gc, path, trans)
def _draw_steps_pre(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices)-1, 2), np.float_)
steps[0::2, 0], steps[1::2, 0] = vertices[:, 0], vertices[:-1, 0]
steps[0::2, 1], steps[1:-1:2, 1] = vertices[:, 1], vertices[1:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_post(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices)-1, 2), np.float_)
steps[::2, 0], steps[1:-1:2, 0] = vertices[:, 0], vertices[1:, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:-1, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_mid(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices), 2), np.float_)
steps[1:-1:2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[2::2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[0, 0] = vertices[0, 0]
steps[-1, 0] = vertices[-1, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_nothing(self, *args, **kwargs):
pass
def _draw_solid(self, renderer, gc, path, trans):
gc.set_linestyle('solid')
renderer.draw_path(gc, path, trans)
def _draw_dashed(self, renderer, gc, path, trans):
gc.set_linestyle('dashed')
if self._dashSeq is not None:
gc.set_dashes(0, self._dashSeq)
renderer.draw_path(gc, path, trans)
def _draw_dash_dot(self, renderer, gc, path, trans):
gc.set_linestyle('dashdot')
renderer.draw_path(gc, path, trans)
def _draw_dotted(self, renderer, gc, path, trans):
gc.set_linestyle('dotted')
renderer.draw_path(gc, path, trans)
def _draw_point(self, renderer, gc, path, path_trans):
w = renderer.points_to_pixels(self._markersize) * \
self._point_size_reduction * 0.5
gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0)
rgbFace = self._get_rgb_face()
transform = Affine2D().scale(w)
renderer.draw_markers(
gc, Path.unit_circle(), transform, path, path_trans,
rgbFace)
_draw_pixel_transform = Affine2D().translate(-0.5, -0.5)
def _draw_pixel(self, renderer, gc, path, path_trans):
rgbFace = self._get_rgb_face()
gc.set_snap(False)
renderer.draw_markers(gc, Path.unit_rectangle(),
self._draw_pixel_transform,
path, path_trans, rgbFace)
def _draw_circle(self, renderer, gc, path, path_trans):
w = renderer.points_to_pixels(self._markersize) * 0.5
gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0)
rgbFace = self._get_rgb_face()
transform = Affine2D().scale(w, w)
renderer.draw_markers(
gc, Path.unit_circle(), transform, path, path_trans,
rgbFace)
_triangle_path = Path([[0.0, 1.0], [-1.0, -1.0], [1.0, -1.0], [0.0, 1.0]])
def _draw_triangle_up(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_down(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, -offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_left(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset).rotate_deg(90)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_right(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset).rotate_deg(-90)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_square(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 2.0)
side = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5).scale(side)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_diamond(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
side = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45).scale(side)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_thin_diamond(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5) \
.rotate_deg(45).scale(offset * 0.6, offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_pentagon(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(5), transform,
path, path_trans, rgbFace)
def _draw_star(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
_starpath = Path.unit_regular_star(5, innerCircle=0.381966)
renderer.draw_markers(gc, _starpath, transform,
path, path_trans, rgbFace)
def _draw_hexagon1(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform,
path, path_trans, rgbFace)
def _draw_hexagon2(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(30)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform,
path, path_trans, rgbFace)
_line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]])
def _draw_vline(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._line_marker_path, transform,
path, path_trans)
def _draw_hline(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._line_marker_path, transform,
path, path_trans)
_tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]])
def _draw_tickleft(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(-offset, 1.0)
renderer.draw_markers(gc, self._tickhoriz_path, marker_transform,
path, path_trans)
def _draw_tickright(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(offset, 1.0)
renderer.draw_markers(gc, self._tickhoriz_path, marker_transform,
path, path_trans)
_tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]])
def _draw_tickup(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(1.0, offset)
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
path, path_trans)
def _draw_tickdown(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(1.0, -offset)
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
path, path_trans)
_plus_path = Path([[-1.0, 0.0], [1.0, 0.0],
[0.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_plus(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._plus_path, transform,
path, path_trans)
_tri_path = Path([[0.0, 0.0], [0.0, -1.0],
[0.0, 0.0], [0.8, 0.5],
[0.0, 0.0], [-0.8, 0.5]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_tri_down(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_up(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(180)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_left(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_right(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(270)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
_caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]])
def _draw_caretdown(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretup(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(180)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretleft(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(270)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretright(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
_x_path = Path([[-1.0, -1.0], [1.0, 1.0],
[-1.0, 1.0], [1.0, -1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_x(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._x_path, transform,
path, path_trans)
def update_from(self, other):
'copy properties from other to self'
Artist.update_from(self, other)
self._linestyle = other._linestyle
self._linewidth = other._linewidth
self._color = other._color
self._markersize = other._markersize
self._markerfacecolor = other._markerfacecolor
self._markeredgecolor = other._markeredgecolor
self._markeredgewidth = other._markeredgewidth
self._dashSeq = other._dashSeq
self._dashcapstyle = other._dashcapstyle
self._dashjoinstyle = other._dashjoinstyle
self._solidcapstyle = other._solidcapstyle
self._solidjoinstyle = other._solidjoinstyle
self._linestyle = other._linestyle
self._marker = other._marker
self._drawstyle = other._drawstyle
def _get_rgb_face(self):
facecolor = self.get_markerfacecolor()
if is_string_like(facecolor) and facecolor.lower()=='none':
rgbFace = None
else:
rgbFace = colorConverter.to_rgb(facecolor)
return rgbFace
# some aliases....
def set_aa(self, val):
'alias for set_antialiased'
self.set_antialiased(val)
def set_c(self, val):
'alias for set_color'
self.set_color(val)
def set_ls(self, val):
'alias for set_linestyle'
self.set_linestyle(val)
def set_lw(self, val):
'alias for set_linewidth'
self.set_linewidth(val)
def set_mec(self, val):
'alias for set_markeredgecolor'
self.set_markeredgecolor(val)
def set_mew(self, val):
'alias for set_markeredgewidth'
self.set_markeredgewidth(val)
def set_mfc(self, val):
'alias for set_markerfacecolor'
self.set_markerfacecolor(val)
def set_ms(self, val):
'alias for set_markersize'
self.set_markersize(val)
def get_aa(self):
'alias for get_antialiased'
return self.get_antialiased()
def get_c(self):
'alias for get_color'
return self.get_color()
def get_ls(self):
'alias for get_linestyle'
return self.get_linestyle()
def get_lw(self):
'alias for get_linewidth'
return self.get_linewidth()
def get_mec(self):
'alias for get_markeredgecolor'
return self.get_markeredgecolor()
def get_mew(self):
'alias for get_markeredgewidth'
return self.get_markeredgewidth()
def get_mfc(self):
'alias for get_markerfacecolor'
return self.get_markerfacecolor()
def get_ms(self):
'alias for get_markersize'
return self.get_markersize()
def set_dash_joinstyle(self, s):
"""
Set the join style for dashed linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_dash_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._dashjoinstyle = s
def set_solid_joinstyle(self, s):
"""
Set the join style for solid linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_solid_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._solidjoinstyle = s
def get_dash_joinstyle(self):
"""
Get the join style for dashed linestyles
"""
return self._dashjoinstyle
def get_solid_joinstyle(self):
"""
Get the join style for solid linestyles
"""
return self._solidjoinstyle
def set_dash_capstyle(self, s):
"""
Set the cap style for dashed linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_dash_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._dashcapstyle = s
def set_solid_capstyle(self, s):
"""
Set the cap style for solid linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_solid_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._solidcapstyle = s
def get_dash_capstyle(self):
"""
Get the cap style for dashed linestyles
"""
return self._dashcapstyle
def get_solid_capstyle(self):
"""
Get the cap style for solid linestyles
"""
return self._solidcapstyle
def is_dashed(self):
'return True if line is dashstyle'
return self._linestyle in ('--', '-.', ':')
class VertexSelector:
"""
Manage the callbacks to maintain a list of selected vertices for
:class:`matplotlib.lines.Line2D`. Derived classes should override
:meth:`~matplotlib.lines.VertexSelector.process_selected` to do
something with the picks.
Here is an example which highlights the selected verts with red
circles::
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
class HighlightSelected(lines.VertexSelector):
def __init__(self, line, fmt='ro', **kwargs):
lines.VertexSelector.__init__(self, line)
self.markers, = self.axes.plot([], [], fmt, **kwargs)
def process_selected(self, ind, xs, ys):
self.markers.set_data(xs, ys)
self.canvas.draw()
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = np.random.rand(2, 30)
line, = ax.plot(x, y, 'bs-', picker=5)
selector = HighlightSelected(line)
plt.show()
"""
def __init__(self, line):
"""
Initialize the class with a :class:`matplotlib.lines.Line2D`
instance. The line should already be added to some
:class:`matplotlib.axes.Axes` instance and should have the
picker property set.
"""
if not hasattr(line, 'axes'):
raise RuntimeError('You must first add the line to the Axes')
if line.get_picker() is None:
raise RuntimeError('You must first set the picker property of the line')
self.axes = line.axes
self.line = line
self.canvas = self.axes.figure.canvas
self.cid = self.canvas.mpl_connect('pick_event', self.onpick)
self.ind = set()
def process_selected(self, ind, xs, ys):
"""
Default "do nothing" implementation of the
:meth:`process_selected` method.
*ind* are the indices of the selected vertices. *xs* and *ys*
are the coordinates of the selected vertices.
"""
pass
def onpick(self, event):
'When the line is picked, update the set of selected indicies.'
if event.artist is not self.line: return
for i in event.ind:
if i in self.ind:
self.ind.remove(i)
else:
self.ind.add(i)
ind = list(self.ind)
ind.sort()
xdata, ydata = self.line.get_data()
self.process_selected(ind, xdata[ind], ydata[ind])
lineStyles = Line2D._lineStyles
lineMarkers = Line2D._markers
drawStyles = Line2D.drawStyles
artist.kwdocd['Line2D'] = artist.kwdoc(Line2D)
# You can not set the docstring of an instancemethod,
# but you can on the underlying function. Go figure.
Line2D.__init__.im_func.__doc__ = dedent(Line2D.__init__.__doc__) % artist.kwdocd
| agpl-3.0 |
khkaminska/bokeh | bokeh/_legacy_charts/builder/tests/test_histogram_builder.py | 6 | 4247 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
from mock import patch
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pandas as pd
from bokeh._legacy_charts import Histogram
from ._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestHistogram(unittest.TestCase):
def test_supported_input(self):
normal = [1, 2, 3, 1]
lognormal = [5, 4, 4, 1]
xyvalues = OrderedDict(normal=normal, lognormal=lognormal)
xyvaluesdf = pd.DataFrame(xyvalues)
exptected = dict(
leftnormal=[1., 1.4, 1.8, 2.2, 2.6],
rightnormal=[1.4, 1.8, 2.2, 2.6, 3.],
lognormal=[5, 4, 4, 1],
edgeslognormal=[1., 1.8, 2.6, 3.4, 4.2, 5.],
bottomlognormal=[0, 0, 0, 0, 0],
bottomnormal=[0, 0, 0, 0, 0],
edgesnormal=[1., 1.4, 1.8, 2.2, 2.6, 3.],
histlognormal=[0.3125, 0., 0., 0.625, 0.3125],
leftlognormal=[1., 1.8, 2.6, 3.4, 4.2],
normal=[1, 2, 3, 1],
rightlognormal=[1.8, 2.6, 3.4, 4.2, 5.],
histnormal=[1.25, 0., 0.625, 0., 0.625],
)
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Histogram, _xy, bins=5)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
for key, expected_v in exptected.items():
assert_array_almost_equal(builder._data[key], expected_v, decimal=2)
lvalues = [[1, 2, 3, 1], [5, 4, 4, 1]]
for i, _xy in enumerate([lvalues, np.array(lvalues)]):
hm = create_chart(Histogram, _xy, bins=5)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1'])
for key, expected_v in exptected.items():
# replace the keys because we have 0, 1 instead of normal and lognormal
key = key.replace('lognormal', '1').replace('normal', '0')
assert_array_almost_equal(builder._data[key], expected_v, decimal=2)
@patch('bokeh._legacy_charts.builder.histogram_builder.np.histogram', return_value=([1, 3, 4], [2.4, 4]))
def test_histogram_params(self, histogram_mock):
inputs = [[5, 0, 0.5, True], [3, 1, 0, False]]
normal = [1, 2, 3, 1]
lognormal = [5, 4, 4, 1]
xyvalues = OrderedDict()
xyvalues['normal'] = normal
xyvalues['lognormal'] = lognormal
for (bins, mu, sigma, dens) in inputs:
histogram_mock.reset_mock()
kws = dict(bins=bins, mu=mu, sigma=sigma, density=dens)
hm = create_chart(Histogram, xyvalues, compute_values=False, **kws)
builder = hm._builders[0]
# ensure all class attributes have been correctly set
for key, value in kws.items():
self.assertEqual(getattr(builder, key), value)
builder._process_data()
# ensure we are calling numpy.histogram with the right args
calls = histogram_mock.call_args_list
assert_array_equal(calls[0][0][0], np.array([1, 2, 3, 1]))
assert_array_equal(calls[1][0][0], np.array([5, 4, 4, 1]))
self.assertEqual(calls[0][1]['bins'], bins)
self.assertEqual(calls[1][1]['bins'], bins)
self.assertEqual(calls[0][1]['density'], dens)
self.assertEqual(calls[1][1]['density'], dens)
| bsd-3-clause |
arrow-/simQuad | ground_station/gyro_scope.py | 2 | 5471 | '''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
IMPORTANT!!
It is suggested you run this script with mpu_level2.ino first to see and understand
its operation.
Basically this script EXPECTS:
Arduino is providing space separated gyro readings @ ~5ms intervals (via MPU Interrupt).
* Each serial packet must be ASCII and look like:
[x_gyro]<space>[y_gyro]<space>[z_gyro]<newline>
+ You need to specify correct Serial port
+ You need to set the Y-limits of the plot axis.
+ You need to use correct value of "dt".
+ You need to set the correct conversion factor for Gyro readings.
Mode 0 1 2 3
Range +-250 +-500 +-1000 +-2000
Conv. 131 65.5 32.75 16.375
AND it DELIVERS:
* 3 axis loss-less Gyro readings plot (almost real time).
* 3D visualisation of current orientation based on gyro vals
If you want to just plot data in ~real time use {oscilloscope.py}.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
'''
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import serial, time
def rotate(v, axis, theta):
'''
Rotates "v" vector about "axis" vector by "theta" radians, returns vector
'''
c = np.cos(theta)
s = np.sin(theta)
t = 1-c
mat = np.array([ [c+axis[0]*axis[0]*t, axis[0]*axis[1]*t-axis[2]*s, axis[0]*axis[2]*t+axis[1]*s],
[axis[0]*axis[1]*t+axis[2]*s, c+axis[1]*axis[1]*t, axis[1]*axis[2]*t-axis[0]*s],
[axis[0]*axis[2]*t-axis[1]*s, axis[1]*axis[2]*t+axis[0]*s, c+axis[2]*axis[2]*t] ])
return mat.dot(v.T)
def calcPose(omega):
'''
Helper function. Finds the "d"-theta, then calls rotate.
Omega must be in ** degrees per second **
'''
theta = omega*dt*np.pi/180 #theta is "d-theta" in radians
rpy[1] = rotate(rpy[1], rpy[0], theta[0])
rpy[0] = rotate(rpy[0], rpy[1], theta[1])
rpy[2] = np.cross(rpy[0], rpy[1])
rpy[1] = rotate(rpy[1], rpy[2], theta[2])
rpy[0] = rotate(rpy[0], rpy[2], theta[2])
plt.ion()
# SET CORRECT PORT NUM HERE
arduino = serial.Serial('/dev/ttyACM0', 57600)
# dt is found experimentally. Contact Ananya for details. Basically this the time between
# 2 MPU(gyro) interrupts. The np.pi/180 converts deg/sec to rad/sec.
# SET CORRECT dt HERE. TIME IN SECONDS BETWEEN TWO SENSOR PACKETS AS RECVD. BY ARDUINO.
dt = .005 # 5msec
# rpy is original orientation. These vectors are updated by calcPose()
rpy = np.eye(3)
fig = plt.figure(figsize=(16,6))
axes = fig.add_subplot(121)
a3d = fig.add_subplot(122, projection='3d')
a3d.set_xlim(-1.2,1.2)
a3d.set_ylim(-1.2,1.2)
a3d.set_zlim(-1.2,1.2)
a3d.scatter([0], [0], [0], s=40)
r, = a3d.plot([0,1], [0,0], [0,0], lw=2, c='black')
p, = a3d.plot([0,0], [0,1], [0,0], lw=2, c='red')
a3d.plot([0,2], [0,0], [0,0], c='cyan')
a3d.plot([0,0], [0,2], [0,0], c='brown')
a3d.plot([0,0], [0,0], [0,2], c='green')
a3d.plot([0,-2], [0,0], [0,0], ls='--', c='cyan')
a3d.plot([0,0], [0,-2], [0,0], ls='--', c='brown')
a3d.plot([0,0], [0,0], [0,-2], ls='--', c='green')
num_samples = 0
buff = 0
# "buff" counts till 50. Every time it reaches fifty, plt.draw() is called, since
# plt.draw() is a costly operation. Normal list append and pose calculations are fast.
# So, do those diligently, for every sample, but update display
# rarely (while ensuring smooth animation).
gyro_x = [0]
gyro_y = [0] # gyro data lists. I use them like queues.
gyro_z = [0]
t = [0]
# scopes is a list of 3 matplotlib.Line_2D objects.
scopes = [axes.plot(t, gyro_x, label=r'$\omega_x$')[0], axes.plot(t, gyro_y, label=r'$\omega_y$')[0], axes.plot(t, gyro_z, label=r'$\omega_z$')[0]]
axes.legend(prop=dict(size=14))
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
axes.set_ylim(-505, 505) # SET CORRECT Y-LIM HERE
conversion = 65.5 #Gyro 500 SET CORRECT CONV FACTOR HERE
# Refer datasheet. Convert ADC result into a Physical measurement.
# If you don't understand this, pls. leave project.
print 'Me Ready'
time.sleep(2)
#Handshake MAY BE REDUNDANT
print arduino.inWaiting()
arduino.flushInput()
arduino.write('e')
print 'Sent Request...'
data = [0]*6
while True:
try:
num = arduino.read(12)
num = [ord(x) for x in num]
except:
print 'Serial error!'
raise RuntimeError
_ind=0 #this var is connected to for loop below!!
for i in range(0,12, 2):
data[_ind] = (num[i]<<8)|num[i+1]
if data[_ind] & 0x8000:
data[_ind] = data[_ind] - 0x10000
_ind += 1
#print data[3:]
datas = np.array([float(data[3])/conversion, float(data[4])/conversion, float(data[5])/conversion])
gyro_x.append(datas[0])
gyro_y.append(datas[1])
gyro_z.append(datas[2])
num_samples += 1
t.append(num_samples)
calcPose(datas) #This function updates the global variable: "rpy"
if num_samples>200:
del t[0]
del gyro_x[0]
del gyro_y[0]
del gyro_z[0]
axes.set_xlim(t[0], num_samples)
scopes[0].set_data(t, gyro_x)
scopes[1].set_data(t, gyro_y)
scopes[2].set_data(t, gyro_z)
# pose matrix is just an easier way of giving input to the .set_data()
# and .set_3d_properties() methods. You see, line needs 2 (end) points:
# the rpy entries AND THE ORIGIN. pose matrix does just that: specifies
# BOTH end points.
pose = np.array([np.array([np.zeros(3), rpy[0]]).T, np.array([np.zeros(3), rpy[1]]).T, np.array([np.zeros(3), rpy[2]]).T])
r.set_data(pose[0][:2])
r.set_3d_properties(pose[0][2])
p.set_data(pose[1][:2])
p.set_3d_properties(pose[1][2])
if buff>25:
buff=0
plt.draw()
buff += 1
plt.ioff()
plt.show() | gpl-2.0 |
pulinagrawal/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/projections/__init__.py | 69 | 2179 | from geo import AitoffAxes, HammerAxes, LambertAxes
from polar import PolarAxes
from matplotlib import axes
class ProjectionRegistry(object):
"""
Manages the set of projections available to the system.
"""
def __init__(self):
self._all_projection_types = {}
def register(self, *projections):
"""
Register a new set of projection(s).
"""
for projection in projections:
name = projection.name
self._all_projection_types[name] = projection
def get_projection_class(self, name):
"""
Get a projection class from its *name*.
"""
return self._all_projection_types[name]
def get_projection_names(self):
"""
Get a list of the names of all projections currently
registered.
"""
names = self._all_projection_types.keys()
names.sort()
return names
projection_registry = ProjectionRegistry()
projection_registry.register(
axes.Axes,
PolarAxes,
AitoffAxes,
HammerAxes,
LambertAxes)
def register_projection(cls):
projection_registry.register(cls)
def get_projection_class(projection=None):
"""
Get a projection class from its name.
If *projection* is None, a standard rectilinear projection is
returned.
"""
if projection is None:
projection = 'rectilinear'
try:
return projection_registry.get_projection_class(projection)
except KeyError:
raise ValueError("Unknown projection '%s'" % projection)
def projection_factory(projection, figure, rect, **kwargs):
"""
Get a new projection instance.
*projection* is a projection name.
*figure* is a figure to add the axes to.
*rect* is a :class:`~matplotlib.transforms.Bbox` object specifying
the location of the axes within the figure.
Any other kwargs are passed along to the specific projection
constructor being used.
"""
return get_projection_class(projection)(figure, rect, **kwargs)
def get_projection_names():
"""
Get a list of acceptable projection names.
"""
return projection_registry.get_projection_names()
| agpl-3.0 |
yarikoptic/pystatsmodels | statsmodels/graphics/tests/test_functional.py | 3 | 2815 | import numpy as np
from numpy.testing import dec, assert_equal, assert_almost_equal
from statsmodels.graphics.functional import \
banddepth, fboxplot, rainbowplot
try:
import matplotlib.pyplot as plt
import matplotlib
if matplotlib.__version__ < '1':
raise
have_matplotlib = True
except:
have_matplotlib = False
def test_banddepth_BD2():
xx = np.arange(500) / 150.
y1 = 1 + 0.5 * np.sin(xx)
y2 = 0.3 + np.sin(xx + np.pi/6)
y3 = -0.5 + np.sin(xx + np.pi/6)
y4 = -1 + 0.3 * np.cos(xx + np.pi/6)
data = np.asarray([y1, y2, y3, y4])
depth = banddepth(data, method='BD2')
expected_depth = [0.5, 5./6, 5./6, 0.5]
assert_almost_equal(depth, expected_depth)
## Plot to visualize why we expect this output
#fig = plt.figure()
#ax = fig.add_subplot(111)
#for ii, yy in enumerate([y1, y2, y3, y4]):
# ax.plot(xx, yy, label="y%s" % ii)
#ax.legend()
#plt.show()
def test_banddepth_MBD():
xx = np.arange(5001) / 5000.
y1 = np.zeros(xx.shape)
y2 = 2 * xx - 1
y3 = np.ones(xx.shape) * 0.5
y4 = np.ones(xx.shape) * -0.25
data = np.asarray([y1, y2, y3, y4])
depth = banddepth(data, method='MBD')
expected_depth = [5./6, (2*(0.75-3./8)+3)/6, 3.5/6, (2*3./8+3)/6]
assert_almost_equal(depth, expected_depth, decimal=4)
@dec.skipif(not have_matplotlib)
def test_fboxplot_rainbowplot():
"""Test fboxplot and rainbowplot together, is much faster."""
def harmfunc(t):
"""Test function, combination of a few harmonic terms."""
# Constant, 0 with p=0.9, 1 with p=1 - for creating outliers
ci = int(np.random.random() > 0.9)
a1i = np.random.random() * 0.05
a2i = np.random.random() * 0.05
b1i = (0.15 - 0.1) * np.random.random() + 0.1
b2i = (0.15 - 0.1) * np.random.random() + 0.1
func = (1 - ci) * (a1i * np.sin(t) + a2i * np.cos(t)) + \
ci * (b1i * np.sin(t) + b2i * np.cos(t))
return func
np.random.seed(1234567)
# Some basic test data, Model 6 from Sun and Genton.
t = np.linspace(0, 2 * np.pi, 250)
data = []
for ii in range(20):
data.append(harmfunc(t))
# fboxplot test
fig = plt.figure()
ax = fig.add_subplot(111)
_, depth, ix_depth, ix_outliers = fboxplot(data, wfactor=2, ax=ax)
ix_expected = np.array([13, 4, 15, 19, 8, 6, 3, 16, 9, 7, 1, 5, 2,
12, 17, 11, 14, 10, 0, 18])
assert_equal(ix_depth, ix_expected)
ix_expected2 = np.array([2, 11, 17, 18])
assert_equal(ix_outliers, ix_expected2)
plt.close(fig)
# rainbowplot test (re-uses depth variable)
xdata = np.arange(data[0].size)
fig = rainbowplot(data, xdata=xdata, depth=depth, cmap=plt.cm.rainbow)
plt.close(fig)
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/core/reshape/merge.py | 3 | 53839 | """
SQL-style merge routines
"""
import copy
import warnings
import string
import numpy as np
from pandas.compat import range, lzip, zip, map, filter
import pandas.compat as compat
from pandas import (Categorical, Series, DataFrame,
Index, MultiIndex, Timedelta)
from pandas.core.frame import _merge_doc
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
is_datetime64_dtype,
needs_i8_conversion,
is_int64_dtype,
is_categorical_dtype,
is_integer_dtype,
is_float_dtype,
is_numeric_dtype,
is_integer,
is_int_or_datetime_dtype,
is_dtype_equal,
is_bool,
is_list_like,
_ensure_int64,
_ensure_float64,
_ensure_object,
_get_dtype)
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.internals import (items_overlap_with_suffix,
concatenate_block_managers)
from pandas.util._decorators import Appender, Substitution
from pandas.core.sorting import is_int64_overflow_possible
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas._libs import hashtable as libhashtable, join as libjoin, lib
@Substitution('\nleft : DataFrame')
@Appender(_merge_doc, indents=0)
def merge(left, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False):
op = _MergeOperation(left, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator)
return op.get_result()
if __debug__:
merge.__doc__ = _merge_doc % '\nleft : DataFrame'
class MergeError(ValueError):
pass
def _groupby_and_merge(by, on, left, right, _merge_pieces,
check_duplicates=True):
"""
groupby & merge; we are always performing a left-by type operation
Parameters
----------
by: field to group
on: duplicates field
left: left frame
right: right frame
_merge_pieces: function for merging
check_duplicates: boolean, default True
should we check & clean duplicates
"""
pieces = []
if not isinstance(by, (list, tuple)):
by = [by]
lby = left.groupby(by, sort=False)
# if we can groupby the rhs
# then we can get vastly better perf
try:
# we will check & remove duplicates if indicated
if check_duplicates:
if on is None:
on = []
elif not isinstance(on, (list, tuple)):
on = [on]
if right.duplicated(by + on).any():
right = right.drop_duplicates(by + on, keep='last')
rby = right.groupby(by, sort=False)
except KeyError:
rby = None
for key, lhs in lby:
if rby is None:
rhs = right
else:
try:
rhs = right.take(rby.indices[key])
except KeyError:
# key doesn't exist in left
lcols = lhs.columns.tolist()
cols = lcols + [r for r in right.columns
if r not in set(lcols)]
merged = lhs.reindex(columns=cols)
merged.index = range(len(merged))
pieces.append(merged)
continue
merged = _merge_pieces(lhs, rhs)
# make sure join keys are in the merged
# TODO, should _merge_pieces do this?
for k in by:
try:
if k in merged:
merged[k] = key
except:
pass
pieces.append(merged)
# preserve the original order
# if we have a missing piece this can be reset
from pandas.core.reshape.concat import concat
result = concat(pieces, ignore_index=True)
result = result.reindex(columns=pieces[0].columns, copy=False)
return result, lby
def ordered_merge(left, right, on=None,
left_on=None, right_on=None,
left_by=None, right_by=None,
fill_method=None, suffixes=('_x', '_y')):
warnings.warn("ordered_merge is deprecated and replaced by merge_ordered",
FutureWarning, stacklevel=2)
return merge_ordered(left, right, on=on,
left_on=left_on, right_on=right_on,
left_by=left_by, right_by=right_by,
fill_method=fill_method, suffixes=suffixes)
def merge_ordered(left, right, on=None,
left_on=None, right_on=None,
left_by=None, right_by=None,
fill_method=None, suffixes=('_x', '_y'),
how='outer'):
"""Perform merge with optional filling/interpolation designed for ordered
data like time series data. Optionally perform group-wise merge (see
examples)
Parameters
----------
left : DataFrame
right : DataFrame
on : label or list
Field names to join on. Must be found in both DataFrames.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_by : column name or list of column names
Group left DataFrame by group columns and merge piece by piece with
right DataFrame
right_by : column name or list of column names
Group right DataFrame by group columns and merge piece by piece with
left DataFrame
fill_method : {'ffill', None}, default None
Interpolation method for data
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
how : {'left', 'right', 'outer', 'inner'}, default 'outer'
* left: use only keys from left frame (SQL: left outer join)
* right: use only keys from right frame (SQL: right outer join)
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join)
.. versionadded:: 0.19.0
Examples
--------
>>> A >>> B
key lvalue group key rvalue
0 a 1 a 0 b 1
1 c 2 a 1 c 2
2 e 3 a 2 d 3
3 a 1 b
4 c 2 b
5 e 3 b
>>> ordered_merge(A, B, fill_method='ffill', left_by='group')
key lvalue group rvalue
0 a 1 a NaN
1 b 1 a 1
2 c 2 a 2
3 d 2 a 3
4 e 3 a 3
5 f 3 a 4
6 a 1 b NaN
7 b 1 b 1
8 c 2 b 2
9 d 2 b 3
10 e 3 b 3
11 f 3 b 4
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge
merge_asof
"""
def _merger(x, y):
# perform the ordered merge operation
op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on,
suffixes=suffixes, fill_method=fill_method,
how=how)
return op.get_result()
if left_by is not None and right_by is not None:
raise ValueError('Can only group either left or right frames')
elif left_by is not None:
result, _ = _groupby_and_merge(left_by, on, left, right,
lambda x, y: _merger(x, y),
check_duplicates=False)
elif right_by is not None:
result, _ = _groupby_and_merge(right_by, on, right, left,
lambda x, y: _merger(y, x),
check_duplicates=False)
else:
result = _merger(left, right)
return result
ordered_merge.__doc__ = merge_ordered.__doc__
def merge_asof(left, right, on=None,
left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
suffixes=('_x', '_y'),
tolerance=None,
allow_exact_matches=True,
direction='backward'):
"""Perform an asof merge. This is similar to a left-join except that we
match on nearest key rather than equal keys.
Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
The default is "backward" and is compatible in versions below 0.20.0.
The direction parameter was added in version 0.20.0 and introduces
"forward" and "nearest".
Optionally match on equivalent keys with 'by' before searching with 'on'.
.. versionadded:: 0.19.0
Parameters
----------
left : DataFrame
right : DataFrame
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : boolean
Use the index of the left DataFrame as the join key.
.. versionadded:: 0.19.2
right_index : boolean
Use the index of the right DataFrame as the join key.
.. versionadded:: 0.19.2
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
.. versionadded:: 0.19.2
right_by : column name
Field names to match on in the right DataFrame.
.. versionadded:: 0.19.2
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
tolerance : integer or Timedelta, optional, default None
Select asof tolerance within this range; must be compatible
with the merge index.
allow_exact_matches : boolean, default True
- If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., stricly less-than / strictly greater-than)
direction : 'backward' (default), 'forward', or 'nearest'
Whether to search for prior, subsequent, or closest matches.
.. versionadded:: 0.20.0
Returns
-------
merged : DataFrame
Examples
--------
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> pd.merge_asof(left, right, on='a')
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> pd.merge_asof(left, right, on='a', allow_exact_matches=False)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
>>> pd.merge_asof(left, right, on='a', direction='forward')
a left_val right_val
0 1 a 1.0
1 5 b 6.0
2 10 c NaN
>>> pd.merge_asof(left, right, on='a', direction='nearest')
a left_val right_val
0 1 a 1
1 5 b 6
2 10 c 7
We can use indexed DataFrames as well.
>>> left
left_val
1 a
5 b
10 c
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> pd.merge_asof(left, right, left_index=True, right_index=True)
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker')
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 2ms betwen the quote time and the trade time
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('2ms'))
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 10ms betwen the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propogate forward
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('10ms'),
... allow_exact_matches=False)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
See also
--------
merge
merge_ordered
"""
op = _AsOfMerge(left, right,
on=on, left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
by=by, left_by=left_by, right_by=right_by,
suffixes=suffixes,
how='asof', tolerance=tolerance,
allow_exact_matches=allow_exact_matches,
direction=direction)
return op.get_result()
# TODO: transformations??
# TODO: only copy DataFrames when modification necessary
class _MergeOperation(object):
"""
Perform a database (SQL) merge operation between two DataFrame objects
using either columns as keys or their row indexes
"""
_merge_type = 'merge'
def __init__(self, left, right, how='inner', on=None,
left_on=None, right_on=None, axis=1,
left_index=False, right_index=False, sort=True,
suffixes=('_x', '_y'), copy=True, indicator=False):
self.left = self.orig_left = left
self.right = self.orig_right = right
self.how = how
self.axis = axis
self.on = com._maybe_make_list(on)
self.left_on = com._maybe_make_list(left_on)
self.right_on = com._maybe_make_list(right_on)
self.copy = copy
self.suffixes = suffixes
self.sort = sort
self.left_index = left_index
self.right_index = right_index
self.indicator = indicator
if isinstance(self.indicator, compat.string_types):
self.indicator_name = self.indicator
elif isinstance(self.indicator, bool):
self.indicator_name = '_merge' if self.indicator else None
else:
raise ValueError(
'indicator option can only accept boolean or string arguments')
if not isinstance(left, DataFrame):
raise ValueError(
'can not merge DataFrame with instance of '
'type {0}'.format(type(left)))
if not isinstance(right, DataFrame):
raise ValueError(
'can not merge DataFrame with instance of '
'type {0}'.format(type(right)))
if not is_bool(left_index):
raise ValueError(
'left_index parameter must be of type bool, not '
'{0}'.format(type(left_index)))
if not is_bool(right_index):
raise ValueError(
'right_index parameter must be of type bool, not '
'{0}'.format(type(right_index)))
# warn user when merging between different levels
if left.columns.nlevels != right.columns.nlevels:
msg = ('merging between different levels can give an unintended '
'result ({0} levels on the left, {1} on the right)')
msg = msg.format(left.columns.nlevels, right.columns.nlevels)
warnings.warn(msg, UserWarning)
self._validate_specification()
# note this function has side effects
(self.left_join_keys,
self.right_join_keys,
self.join_names) = self._get_merge_keys()
# validate the merge keys dtypes. We may need to coerce
# to avoid incompat dtypes
self._maybe_coerce_merge_keys()
def get_result(self):
if self.indicator:
self.left, self.right = self._indicator_pre_merge(
self.left, self.right)
join_index, left_indexer, right_indexer = self._get_join_info()
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
lindexers = {1: left_indexer} if left_indexer is not None else {}
rindexers = {1: right_indexer} if right_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
if self.indicator:
result = self._indicator_post_merge(result)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _indicator_pre_merge(self, left, right):
columns = left.columns.union(right.columns)
for i in ['_left_indicator', '_right_indicator']:
if i in columns:
raise ValueError("Cannot use `indicator=True` option when "
"data contains a column named {}".format(i))
if self.indicator_name in columns:
raise ValueError(
"Cannot use name of an existing column for indicator column")
left = left.copy()
right = right.copy()
left['_left_indicator'] = 1
left['_left_indicator'] = left['_left_indicator'].astype('int8')
right['_right_indicator'] = 2
right['_right_indicator'] = right['_right_indicator'].astype('int8')
return left, right
def _indicator_post_merge(self, result):
result['_left_indicator'] = result['_left_indicator'].fillna(0)
result['_right_indicator'] = result['_right_indicator'].fillna(0)
result[self.indicator_name] = Categorical((result['_left_indicator'] +
result['_right_indicator']),
categories=[1, 2, 3])
result[self.indicator_name] = (
result[self.indicator_name]
.cat.rename_categories(['left_only', 'right_only', 'both']))
result = result.drop(labels=['_left_indicator', '_right_indicator'],
axis=1)
return result
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
left_has_missing = None
right_has_missing = None
keys = zip(self.join_names, self.left_on, self.right_on)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
take_left, take_right = None, None
if name in result:
if left_indexer is not None and right_indexer is not None:
if name in self.left:
if left_has_missing is None:
left_has_missing = (left_indexer == -1).any()
if left_has_missing:
take_right = self.right_join_keys[i]
if not is_dtype_equal(result[name].dtype,
self.left[name].dtype):
take_left = self.left[name]._values
elif name in self.right:
if right_has_missing is None:
right_has_missing = (right_indexer == -1).any()
if right_has_missing:
take_left = self.left_join_keys[i]
if not is_dtype_equal(result[name].dtype,
self.right[name].dtype):
take_right = self.right[name]._values
elif left_indexer is not None \
and isinstance(self.left_join_keys[i], np.ndarray):
take_left = self.left_join_keys[i]
take_right = self.right_join_keys[i]
if take_left is not None or take_right is not None:
if take_left is None:
lvals = result[name]._values
else:
lfill = na_value_for_dtype(take_left.dtype)
lvals = algos.take_1d(take_left, left_indexer,
fill_value=lfill)
if take_right is None:
rvals = result[name]._values
else:
rfill = na_value_for_dtype(take_right.dtype)
rvals = algos.take_1d(take_right, right_indexer,
fill_value=rfill)
# if we have an all missing left_indexer
# make sure to just use the right values
mask = left_indexer == -1
if mask.all():
key_col = rvals
else:
key_col = Index(lvals).where(~mask, rvals)
if name in result:
result[name] = key_col
else:
result.insert(i, name or 'key_%d' % i, key_col)
def _get_join_indexers(self):
""" return the join indexers """
return _get_join_indexers(self.left_join_keys,
self.right_join_keys,
sort=self.sort,
how=self.how)
def _get_join_info(self):
left_ax = self.left._data.axes[self.axis]
right_ax = self.right._data.axes[self.axis]
if self.left_index and self.right_index and self.how != 'asof':
join_index, left_indexer, right_indexer = \
left_ax.join(right_ax, how=self.how, return_indexers=True,
sort=self.sort)
elif self.right_index and self.how == 'left':
join_index, left_indexer, right_indexer = \
_left_join_on_index(left_ax, right_ax, self.left_join_keys,
sort=self.sort)
elif self.left_index and self.how == 'right':
join_index, right_indexer, left_indexer = \
_left_join_on_index(right_ax, left_ax, self.right_join_keys,
sort=self.sort)
else:
(left_indexer,
right_indexer) = self._get_join_indexers()
if self.right_index:
if len(self.left) > 0:
join_index = self.left.index.take(left_indexer)
else:
join_index = self.right.index.take(right_indexer)
left_indexer = np.array([-1] * len(join_index))
elif self.left_index:
if len(self.right) > 0:
join_index = self.right.index.take(right_indexer)
else:
join_index = self.left.index.take(left_indexer)
right_indexer = np.array([-1] * len(join_index))
else:
join_index = Index(np.arange(len(left_indexer)))
if len(join_index) == 0:
join_index = join_index.astype(object)
return join_index, left_indexer, right_indexer
def _get_merge_keys(self):
"""
Note: has side effects (copy/delete key columns)
Parameters
----------
left
right
on
Returns
-------
left_keys, right_keys
"""
left_keys = []
right_keys = []
join_names = []
right_drop = []
left_drop = []
left, right = self.left, self.right
is_lkey = lambda x: isinstance(
x, (np.ndarray, Series)) and len(x) == len(left)
is_rkey = lambda x: isinstance(
x, (np.ndarray, Series)) and len(x) == len(right)
# Note that pd.merge_asof() has separate 'on' and 'by' parameters. A
# user could, for example, request 'left_index' and 'left_by'. In a
# regular pd.merge(), users cannot specify both 'left_index' and
# 'left_on'. (Instead, users have a MultiIndex). That means the
# self.left_on in this function is always empty in a pd.merge(), but
# a pd.merge_asof(left_index=True, left_by=...) will result in a
# self.left_on array with a None in the middle of it. This requires
# a work-around as designated in the code below.
# See _validate_specification() for where this happens.
# ugh, spaghetti re #733
if _any(self.left_on) and _any(self.right_on):
for lk, rk in zip(self.left_on, self.right_on):
if is_lkey(lk):
left_keys.append(lk)
if is_rkey(rk):
right_keys.append(rk)
join_names.append(None) # what to do?
else:
if rk is not None:
right_keys.append(right[rk]._values)
join_names.append(rk)
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
join_names.append(right.index.name)
else:
if not is_rkey(rk):
if rk is not None:
right_keys.append(right[rk]._values)
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
if lk is not None and lk == rk:
# avoid key upcast in corner case (length-0)
if len(left) > 0:
right_drop.append(rk)
else:
left_drop.append(lk)
else:
right_keys.append(rk)
if lk is not None:
left_keys.append(left[lk]._values)
join_names.append(lk)
else:
# work-around for merge_asof(left_index=True)
left_keys.append(left.index)
join_names.append(left.index.name)
elif _any(self.left_on):
for k in self.left_on:
if is_lkey(k):
left_keys.append(k)
join_names.append(None)
else:
left_keys.append(left[k]._values)
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
right_keys = [lev._values.take(lab)
for lev, lab in zip(self.right.index.levels,
self.right.index.labels)]
else:
right_keys = [self.right.index.values]
elif _any(self.right_on):
for k in self.right_on:
if is_rkey(k):
right_keys.append(k)
join_names.append(None)
else:
right_keys.append(right[k]._values)
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
left_keys = [lev._values.take(lab)
for lev, lab in zip(self.left.index.levels,
self.left.index.labels)]
else:
left_keys = [self.left.index.values]
if left_drop:
self.left = self.left.drop(left_drop, axis=1)
if right_drop:
self.right = self.right.drop(right_drop, axis=1)
return left_keys, right_keys, join_names
def _maybe_coerce_merge_keys(self):
# we have valid mergee's but we may have to further
# coerce these if they are originally incompatible types
#
# for example if these are categorical, but are not dtype_equal
# or if we have object and integer dtypes
for lk, rk, name in zip(self.left_join_keys,
self.right_join_keys,
self.join_names):
if (len(lk) and not len(rk)) or (not len(lk) and len(rk)):
continue
# if either left or right is a categorical
# then the must match exactly in categories & ordered
if is_categorical_dtype(lk) and is_categorical_dtype(rk):
if lk.is_dtype_equal(rk):
continue
elif is_categorical_dtype(lk) or is_categorical_dtype(rk):
pass
elif is_dtype_equal(lk.dtype, rk.dtype):
continue
# if we are numeric, then allow differing
# kinds to proceed, eg. int64 and int8
# further if we are object, but we infer to
# the same, then proceed
if (is_numeric_dtype(lk) and is_numeric_dtype(rk)):
if lk.dtype.kind == rk.dtype.kind:
continue
# let's infer and see if we are ok
if lib.infer_dtype(lk) == lib.infer_dtype(rk):
continue
# Houston, we have a problem!
# let's coerce to object
if name in self.left.columns:
self.left = self.left.assign(
**{name: self.left[name].astype(object)})
if name in self.right.columns:
self.right = self.right.assign(
**{name: self.right[name].astype(object)})
def _validate_specification(self):
# Hm, any way to make this logic less complicated??
if self.on is None and self.left_on is None and self.right_on is None:
if self.left_index and self.right_index:
self.left_on, self.right_on = (), ()
elif self.left_index:
if self.right_on is None:
raise MergeError('Must pass right_on or right_index=True')
elif self.right_index:
if self.left_on is None:
raise MergeError('Must pass left_on or left_index=True')
else:
# use the common columns
common_cols = self.left.columns.intersection(
self.right.columns)
if len(common_cols) == 0:
raise MergeError('No common columns to perform merge on')
if not common_cols.is_unique:
raise MergeError("Data columns not unique: %s"
% repr(common_cols))
self.left_on = self.right_on = common_cols
elif self.on is not None:
if self.left_on is not None or self.right_on is not None:
raise MergeError('Can only pass argument "on" OR "left_on" '
'and "right_on", not a combination of both.')
self.left_on = self.right_on = self.on
elif self.left_on is not None:
n = len(self.left_on)
if self.right_index:
if len(self.left_on) != self.right.index.nlevels:
raise ValueError('len(left_on) must equal the number '
'of levels in the index of "right"')
self.right_on = [None] * n
elif self.right_on is not None:
n = len(self.right_on)
if self.left_index:
if len(self.right_on) != self.left.index.nlevels:
raise ValueError('len(right_on) must equal the number '
'of levels in the index of "left"')
self.left_on = [None] * n
if len(self.right_on) != len(self.left_on):
raise ValueError("len(right_on) must equal len(left_on)")
def _get_join_indexers(left_keys, right_keys, sort=False, how='inner',
**kwargs):
"""
Parameters
----------
left_keys: ndarray, Index, Series
right_keys: ndarray, Index, Series
sort: boolean, default False
how: string {'inner', 'outer', 'left', 'right'}, default 'inner'
Returns
-------
tuple of (left_indexer, right_indexer)
indexers into the left_keys, right_keys
"""
from functools import partial
assert len(left_keys) == len(right_keys), \
'left_key and right_keys must be the same length'
# bind `sort` arg. of _factorize_keys
fkeys = partial(_factorize_keys, sort=sort)
# get left & right join labels and num. of levels at each location
llab, rlab, shape = map(list, zip(* map(fkeys, left_keys, right_keys)))
# get flat i8 keys from label lists
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
# `count` is the num. of unique keys
# set(lkey) | set(rkey) == range(count)
lkey, rkey, count = fkeys(lkey, rkey)
# preserve left frame order if how == 'left' and sort == False
kwargs = copy.copy(kwargs)
if how == 'left':
kwargs['sort'] = sort
join_func = _join_functions[how]
return join_func(lkey, rkey, count, **kwargs)
class _OrderedMerge(_MergeOperation):
_merge_type = 'ordered_merge'
def __init__(self, left, right, on=None, left_on=None, right_on=None,
left_index=False, right_index=False, axis=1,
suffixes=('_x', '_y'), copy=True,
fill_method=None, how='outer'):
self.fill_method = fill_method
_MergeOperation.__init__(self, left, right, on=on, left_on=left_on,
left_index=left_index,
right_index=right_index,
right_on=right_on, axis=axis,
how=how, suffixes=suffixes,
sort=True # factorize sorts
)
def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
# this is a bit kludgy
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
if self.fill_method == 'ffill':
left_join_indexer = libjoin.ffill_indexer(left_indexer)
right_join_indexer = libjoin.ffill_indexer(right_indexer)
else:
left_join_indexer = left_indexer
right_join_indexer = right_indexer
lindexers = {
1: left_join_indexer} if left_join_indexer is not None else {}
rindexers = {
1: right_join_indexer} if right_join_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _asof_function(direction, on_type):
return getattr(libjoin, 'asof_join_%s_%s' % (direction, on_type), None)
def _asof_by_function(direction, on_type, by_type):
return getattr(libjoin, 'asof_join_%s_%s_by_%s' %
(direction, on_type, by_type), None)
_type_casters = {
'int64_t': _ensure_int64,
'double': _ensure_float64,
'object': _ensure_object,
}
_cython_types = {
'uint8': 'uint8_t',
'uint32': 'uint32_t',
'uint16': 'uint16_t',
'uint64': 'uint64_t',
'int8': 'int8_t',
'int32': 'int32_t',
'int16': 'int16_t',
'int64': 'int64_t',
'float16': 'error',
'float32': 'float',
'float64': 'double',
}
def _get_cython_type(dtype):
""" Given a dtype, return a C name like 'int64_t' or 'double' """
type_name = _get_dtype(dtype).name
ctype = _cython_types.get(type_name, 'object')
if ctype == 'error':
raise MergeError('unsupported type: ' + type_name)
return ctype
def _get_cython_type_upcast(dtype):
""" Upcast a dtype to 'int64_t', 'double', or 'object' """
if is_integer_dtype(dtype):
return 'int64_t'
elif is_float_dtype(dtype):
return 'double'
else:
return 'object'
class _AsOfMerge(_OrderedMerge):
_merge_type = 'asof_merge'
def __init__(self, left, right, on=None, left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
axis=1, suffixes=('_x', '_y'), copy=True,
fill_method=None,
how='asof', tolerance=None,
allow_exact_matches=True,
direction='backward'):
self.by = by
self.left_by = left_by
self.right_by = right_by
self.tolerance = tolerance
self.allow_exact_matches = allow_exact_matches
self.direction = direction
_OrderedMerge.__init__(self, left, right, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, axis=axis,
how=how, suffixes=suffixes,
fill_method=fill_method)
def _validate_specification(self):
super(_AsOfMerge, self)._validate_specification()
# we only allow on to be a single item for on
if len(self.left_on) != 1 and not self.left_index:
raise MergeError("can only asof on a key for left")
if len(self.right_on) != 1 and not self.right_index:
raise MergeError("can only asof on a key for right")
if self.left_index and isinstance(self.left.index, MultiIndex):
raise MergeError("left can only have one index")
if self.right_index and isinstance(self.right.index, MultiIndex):
raise MergeError("right can only have one index")
# set 'by' columns
if self.by is not None:
if self.left_by is not None or self.right_by is not None:
raise MergeError('Can only pass by OR left_by '
'and right_by')
self.left_by = self.right_by = self.by
if self.left_by is None and self.right_by is not None:
raise MergeError('missing left_by')
if self.left_by is not None and self.right_by is None:
raise MergeError('missing right_by')
# add 'by' to our key-list so we can have it in the
# output as a key
if self.left_by is not None:
if not is_list_like(self.left_by):
self.left_by = [self.left_by]
if not is_list_like(self.right_by):
self.right_by = [self.right_by]
if len(self.left_by) != len(self.right_by):
raise MergeError('left_by and right_by must be same length')
self.left_on = self.left_by + list(self.left_on)
self.right_on = self.right_by + list(self.right_on)
# check 'direction' is valid
if self.direction not in ['backward', 'forward', 'nearest']:
raise MergeError('direction invalid: ' + self.direction)
@property
def _asof_key(self):
""" This is our asof key, the 'on' """
return self.left_on[-1]
def _get_merge_keys(self):
# note this function has side effects
(left_join_keys,
right_join_keys,
join_names) = super(_AsOfMerge, self)._get_merge_keys()
# validate index types are the same
for lk, rk in zip(left_join_keys, right_join_keys):
if not is_dtype_equal(lk.dtype, rk.dtype):
raise MergeError("incompatible merge keys, "
"must be the same type")
# validate tolerance; must be a Timedelta if we have a DTI
if self.tolerance is not None:
if self.left_index:
lt = self.left.index
else:
lt = left_join_keys[-1]
msg = "incompatible tolerance, must be compat " \
"with type {0}".format(type(lt))
if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt):
if not isinstance(self.tolerance, Timedelta):
raise MergeError(msg)
if self.tolerance < Timedelta(0):
raise MergeError("tolerance must be positive")
elif is_int64_dtype(lt):
if not is_integer(self.tolerance):
raise MergeError(msg)
if self.tolerance < 0:
raise MergeError("tolerance must be positive")
else:
raise MergeError("key must be integer or timestamp")
# validate allow_exact_matches
if not is_bool(self.allow_exact_matches):
raise MergeError("allow_exact_matches must be boolean, "
"passed {0}".format(self.allow_exact_matches))
return left_join_keys, right_join_keys, join_names
def _get_join_indexers(self):
""" return the join indexers """
def flip(xs):
""" unlike np.transpose, this returns an array of tuples """
labels = list(string.ascii_lowercase[:len(xs)])
dtypes = [x.dtype for x in xs]
labeled_dtypes = list(zip(labels, dtypes))
return np.array(lzip(*xs), labeled_dtypes)
# values to compare
left_values = (self.left.index.values if self.left_index else
self.left_join_keys[-1])
right_values = (self.right.index.values if self.right_index else
self.right_join_keys[-1])
tolerance = self.tolerance
# we required sortedness in the join keys
msg = " keys must be sorted"
if not Index(left_values).is_monotonic:
raise ValueError('left' + msg)
if not Index(right_values).is_monotonic:
raise ValueError('right' + msg)
# initial type conversion as needed
if needs_i8_conversion(left_values):
left_values = left_values.view('i8')
right_values = right_values.view('i8')
if tolerance is not None:
tolerance = tolerance.value
# a "by" parameter requires special handling
if self.left_by is not None:
# remove 'on' parameter from values if one existed
if self.left_index and self.right_index:
left_by_values = self.left_join_keys
right_by_values = self.right_join_keys
else:
left_by_values = self.left_join_keys[0:-1]
right_by_values = self.right_join_keys[0:-1]
# get tuple representation of values if more than one
if len(left_by_values) == 1:
left_by_values = left_by_values[0]
right_by_values = right_by_values[0]
else:
left_by_values = flip(left_by_values)
right_by_values = flip(right_by_values)
# upcast 'by' parameter because HashTable is limited
by_type = _get_cython_type_upcast(left_by_values.dtype)
by_type_caster = _type_casters[by_type]
left_by_values = by_type_caster(left_by_values)
right_by_values = by_type_caster(right_by_values)
# choose appropriate function by type
on_type = _get_cython_type(left_values.dtype)
func = _asof_by_function(self.direction, on_type, by_type)
return func(left_values,
right_values,
left_by_values,
right_by_values,
self.allow_exact_matches,
tolerance)
else:
# choose appropriate function by type
on_type = _get_cython_type(left_values.dtype)
func = _asof_function(self.direction, on_type)
return func(left_values,
right_values,
self.allow_exact_matches,
tolerance)
def _get_multiindex_indexer(join_keys, index, sort):
from functools import partial
# bind `sort` argument
fkeys = partial(_factorize_keys, sort=sort)
# left & right join labels and num. of levels at each location
rlab, llab, shape = map(list, zip(* map(fkeys, index.levels, join_keys)))
if sort:
rlab = list(map(np.take, rlab, index.labels))
else:
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
rlab = list(map(i8copy, index.labels))
# fix right labels if there were any nulls
for i in range(len(join_keys)):
mask = index.labels[i] == -1
if mask.any():
# check if there already was any nulls at this location
# if there was, it is factorized to `shape[i] - 1`
a = join_keys[i][llab[i] == shape[i] - 1]
if a.size == 0 or not a[0] != a[0]:
shape[i] += 1
rlab[i][mask] = shape[i] - 1
# get flat i8 join keys
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
lkey, rkey, count = fkeys(lkey, rkey)
return libjoin.left_outer_join(lkey, rkey, count, sort=sort)
def _get_single_indexer(join_key, index, sort=False):
left_key, right_key, count = _factorize_keys(join_key, index, sort=sort)
left_indexer, right_indexer = libjoin.left_outer_join(
_ensure_int64(left_key),
_ensure_int64(right_key),
count, sort=sort)
return left_indexer, right_indexer
def _left_join_on_index(left_ax, right_ax, join_keys, sort=False):
if len(join_keys) > 1:
if not ((isinstance(right_ax, MultiIndex) and
len(join_keys) == right_ax.nlevels)):
raise AssertionError("If more than one join key is given then "
"'right_ax' must be a MultiIndex and the "
"number of join keys must be the number of "
"levels in right_ax")
left_indexer, right_indexer = \
_get_multiindex_indexer(join_keys, right_ax, sort=sort)
else:
jkey = join_keys[0]
left_indexer, right_indexer = \
_get_single_indexer(jkey, right_ax, sort=sort)
if sort or len(left_ax) != len(left_indexer):
# if asked to sort or there are 1-to-many matches
join_index = left_ax.take(left_indexer)
return join_index, left_indexer, right_indexer
# left frame preserves order & length of its index
return left_ax, None, right_indexer
def _right_outer_join(x, y, max_groups):
right_indexer, left_indexer = libjoin.left_outer_join(y, x, max_groups)
return left_indexer, right_indexer
_join_functions = {
'inner': libjoin.inner_join,
'left': libjoin.left_outer_join,
'right': _right_outer_join,
'outer': libjoin.full_outer_join,
}
def _factorize_keys(lk, rk, sort=True):
if is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):
lk = lk.values
rk = rk.values
# if we exactly match in categories, allow us to use codes
if (is_categorical_dtype(lk) and
is_categorical_dtype(rk) and
lk.is_dtype_equal(rk)):
return lk.codes, rk.codes, len(lk.categories)
if is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk):
klass = libhashtable.Int64Factorizer
lk = _ensure_int64(com._values_from_object(lk))
rk = _ensure_int64(com._values_from_object(rk))
else:
klass = libhashtable.Factorizer
lk = _ensure_object(lk)
rk = _ensure_object(rk)
rizer = klass(max(len(lk), len(rk)))
llab = rizer.factorize(lk)
rlab = rizer.factorize(rk)
count = rizer.get_count()
if sort:
uniques = rizer.uniques.to_array()
llab, rlab = _sort_labels(uniques, llab, rlab)
# NA group
lmask = llab == -1
lany = lmask.any()
rmask = rlab == -1
rany = rmask.any()
if lany or rany:
if lany:
np.putmask(llab, lmask, count)
if rany:
np.putmask(rlab, rmask, count)
count += 1
return llab, rlab, count
def _sort_labels(uniques, left, right):
if not isinstance(uniques, np.ndarray):
# tuplesafe
uniques = Index(uniques).values
l = len(left)
labels = np.concatenate([left, right])
_, new_labels = algos.safe_sort(uniques, labels, na_sentinel=-1)
new_labels = _ensure_int64(new_labels)
new_left, new_right = new_labels[:l], new_labels[l:]
return new_left, new_right
def _get_join_keys(llab, rlab, shape, sort):
# how many levels can be done without overflow
pred = lambda i: not is_int64_overflow_possible(shape[:i])
nlev = next(filter(pred, range(len(shape), 0, -1)))
# get keys for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
lkey = stride * llab[0].astype('i8', subok=False, copy=False)
rkey = stride * rlab[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
stride //= shape[i]
lkey += llab[i] * stride
rkey += rlab[i] * stride
if nlev == len(shape): # all done!
return lkey, rkey
# densify current keys to avoid overflow
lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort)
llab = [lkey] + llab[nlev:]
rlab = [rkey] + rlab[nlev:]
shape = [count] + shape[nlev:]
return _get_join_keys(llab, rlab, shape, sort)
def _should_fill(lname, rname):
if (not isinstance(lname, compat.string_types) or
not isinstance(rname, compat.string_types)):
return True
return lname == rname
def _any(x):
return x is not None and len(x) > 0 and any([y is not None for y in x])
| mit |
PawarPawan/h2o-v3 | h2o-py/tests/testdir_algos/gbm/pyunit_weightsGBM.py | 2 | 6140 | import sys
sys.path.insert(1, "../../../")
import h2o
import random
import copy
def weights_check(ip,port):
def check_same(data1, data2, min_rows_scale):
gbm1_regression = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y="economy",
training_frame=data1,
min_rows=5,
ntrees=5,
max_depth=5)
gbm2_regression = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year", "weights"]],
y=data2["economy"],
min_rows=5*min_rows_scale,
weights_column=data2["weights"],
ntrees=5,
max_depth=5)
gbm1_binomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["economy_20mpg"],
min_rows=5,
distribution="bernoulli",
ntrees=5,
max_depth=5)
gbm2_binomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year", "weights"]],
y=data2["economy_20mpg"],
weights_column="weights",
training_frame=data2,
min_rows=5*min_rows_scale,
distribution="bernoulli",
ntrees=5,
max_depth=5)
gbm1_multinomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["cylinders"],
min_rows=5,
distribution="multinomial",
ntrees=5,
max_depth=5)
gbm2_multinomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year", "weights"]],
y=data2["cylinders"],
weights_column="weights",
training_frame=data2,
min_rows=5*min_rows_scale,
distribution="multinomial",
ntrees=5,
max_depth=5)
reg1_mse = gbm1_regression.mse()
reg2_mse = gbm2_regression.mse()
bin1_auc = gbm1_binomial.auc()
bin2_auc = gbm2_binomial.auc()
mul1_mse = gbm1_multinomial.mse()
mul2_mse = gbm2_multinomial.mse()
print "MSE (regresson) no weights vs. weights: {0}, {1}".format(reg1_mse, reg2_mse)
print "AUC (binomial) no weights vs. weights: {0}, {1}".format(bin1_auc, bin2_auc)
print "MSE (multinomial) no weights vs. weights: {0}, {1}".format(mul1_mse, mul2_mse)
assert abs(reg1_mse - reg2_mse) < 1e-6 * reg1_mse, "Expected mse's to be the same, but got {0}, and {1}".format(reg1_mse, reg2_mse)
assert abs(bin1_auc - bin2_auc) < 3e-4 * bin1_auc, "Expected auc's to be the same, but got {0}, and {1}".format(bin1_auc, bin2_auc)
assert abs(mul1_mse - mul1_mse) < 1e-6 * mul1_mse, "Expected auc's to be the same, but got {0}, and {1}".format(mul1_mse, mul2_mse)
h2o_cars_data = h2o.import_file(h2o.locate("smalldata/junit/cars_20mpg.csv"))
h2o_cars_data["economy_20mpg"] = h2o_cars_data["economy_20mpg"].asfactor()
h2o_cars_data["cylinders"] = h2o_cars_data["cylinders"].asfactor()
# uniform weights same as no weights
random.seed(2222)
weight = random.randint(1,10)
uniform_weights = [[weight] for r in range(406)]
h2o_uniform_weights = h2o.H2OFrame(python_obj=uniform_weights)
h2o_uniform_weights.setNames(["weights"])
h2o_data_uniform_weights = h2o_cars_data.cbind(h2o_uniform_weights)
print "Checking that using uniform weights is equivalent to no weights:"
print
check_same(h2o_cars_data, h2o_data_uniform_weights, weight)
# zero weights same as removed observations
zero_weights = [[0] if random.randint(0,1) else [1] for r in range(406)]
h2o_zero_weights = h2o.H2OFrame(python_obj=zero_weights)
h2o_zero_weights.setNames(["weights"])
h2o_data_zero_weights = h2o_cars_data.cbind(h2o_zero_weights)
h2o_data_zeros_removed = h2o_cars_data[h2o_zero_weights["weights"] == 1]
print "Checking that using some zero weights is equivalent to removing those observations:"
print
check_same(h2o_data_zeros_removed, h2o_data_zero_weights, 1)
# doubled weights same as doubled observations
doubled_weights = [[1] if random.randint(0,1) else [2] for r in range(406)]
h2o_doubled_weights = h2o.H2OFrame(python_obj=doubled_weights)
h2o_doubled_weights.setNames(["weights"])
h2o_data_doubled_weights = h2o_cars_data.cbind(h2o_doubled_weights)
doubled_data = h2o.as_list(h2o_cars_data, use_pandas=False)
colnames = doubled_data.pop(0)
for idx, w in enumerate(doubled_weights):
if w[0] == 2: doubled_data.append(doubled_data[idx])
h2o_data_doubled = h2o.H2OFrame(python_obj=doubled_data)
h2o_data_doubled.setNames(colnames)
h2o_data_doubled["economy_20mpg"] = h2o_data_doubled["economy_20mpg"].asfactor()
h2o_data_doubled["cylinders"] = h2o_data_doubled["cylinders"].asfactor()
h2o_data_doubled_weights["economy_20mpg"] = h2o_data_doubled_weights["economy_20mpg"].asfactor()
h2o_data_doubled_weights["cylinders"] = h2o_data_doubled_weights["cylinders"].asfactor()
print "Checking that doubling some weights is equivalent to doubling those observations:"
print
check_same(h2o_data_doubled, h2o_data_doubled_weights, 1)
# TODO: random weights
# TODO: all zero weights???
# TODO: negative weights???
if __name__ == "__main__":
h2o.run_test(sys.argv, weights_check)
| apache-2.0 |
wkfwkf/statsmodels | statsmodels/tsa/x13.py | 7 | 23281 | """
Run x12/x13-arima specs in a subprocess from Python and curry results back
into python.
Notes
-----
Many of the functions are called x12. However, they are also intended to work
for x13. If this is not the case, it's a bug.
"""
from __future__ import print_function
import os
import subprocess
import tempfile
import re
from warnings import warn
import pandas as pd
from statsmodels.compat.python import iteritems
from statsmodels.tools.tools import Bunch
from statsmodels.tools.sm_exceptions import (X13NotFoundError,
IOWarning, X13Error,
X13Warning)
__all__ = ["x13_arima_select_order", "x13_arima_analysis"]
_binary_names = ('x13as.exe', 'x13as', 'x12a.exe', 'x12a')
class _freq_to_period:
def __getitem__(self, key):
if key.startswith('M'):
return 12
elif key.startswith('Q'):
return 4
_freq_to_period = _freq_to_period()
_period_to_freq = {12 : 'M', 4 : 'Q'}
_log_to_x12 = {True : 'log', False : 'none', None : 'auto'}
_bool_to_yes_no = lambda x : 'yes' if x else 'no'
def _find_x12(x12path=None, prefer_x13=True):
"""
If x12path is not given, then either x13as[.exe] or x12a[.exe] must
be found on the PATH. Otherwise, the environmental variable X12PATH or
X13PATH must be defined. If prefer_x13 is True, only X13PATH is searched
for. If it is false, only X12PATH is searched for.
"""
global _binary_names
if x12path is not None and x12path.endswith(_binary_names):
# remove binary from path if given
x12path = os.path.dirname(x12path)
if not prefer_x13: # search for x12 first
_binary_names = _binary_names[::-1]
if x12path is None:
x12path = os.getenv("X12PATH", "")
if not x12path:
x12path = os.getenv("X13PATH", "")
elif x12path is None:
x12path = os.getenv("X13PATH", "")
if not x12path:
x12path = os.getenv("X12PATH", "")
for binary in _binary_names:
x12 = os.path.join(x12path, binary)
try:
subprocess.check_call(x12, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return x12
except OSError:
pass
else:
return False
def _check_x12(x12path=None):
x12path = _find_x12(x12path)
if not x12path:
raise X13NotFoundError("x12a and x13as not found on path. Give the "
"path, put them on PATH, or set the "
"X12PATH or X13PATH environmental variable.")
return x12path
def _clean_order(order):
"""
Takes something like (1 1 0)(0 1 1) and returns a arma order, sarma
order tuple. Also accepts (1 1 0) and return arma order and (0, 0, 0)
"""
order = re.findall("\([0-9 ]*?\)", order)
clean = lambda x : tuple(map(int, re.sub("[()]", "", x).split(" ")))
if len(order) > 1:
order, sorder = map(clean, order)
else:
order = clean(order[0])
sorder = (0, 0, 0)
return order, sorder
def run_spec(x12path, specpath, outname=None, meta=False, datameta=False):
if meta and datameta:
raise ValueError("Cannot specify both meta and datameta.")
if meta:
args = [x12path, "-m " + specpath]
elif datameta:
args = [x12path, "-d " + specpath]
else:
args = [x12path, specpath]
if outname:
args += [outname]
return subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def _make_automdl_options(maxorder, maxdiff, diff):
options = "\n"
options += "maxorder = ({0} {1})\n".format(maxorder[0], maxorder[1])
if maxdiff is not None: # maxdiff always takes precedence
options += "maxdiff = ({0} {1})\n".format(maxdiff[0], maxdiff[1])
else:
options += "diff = ({0} {1})\n".format(diff[0], diff[1])
return options
def _make_var_names(exog):
if hasattr(exog, "name"):
var_names = exog.name
elif hasattr(exog, "columns"):
var_names = exog.columns
else:
raise ValueError("exog is not a Series or DataFrame or is unnamed.")
try:
var_names = " ".join(var_names)
except TypeError: # cannot have names that are numbers, pandas default
from statsmodels.base.data import _make_exog_names
if exog.ndim == 1:
var_names = "x1"
else:
var_names = " ".join(_make_exog_names(exog))
return var_names
def _make_regression_options(trading, exog):
if not trading and exog is None: # start regression spec
return ""
reg_spec = "regression{\n"
if trading:
reg_spec += " variables = (td)\n"
if exog is not None:
var_names = _make_var_names(exog)
reg_spec += " user = ({0})\n".format(var_names)
reg_spec += " data = ({0})\n".format("\n".join(map(str,
exog.values.ravel().tolist())))
reg_spec += "}\n" # close out regression spec
return reg_spec
def _make_forecast_options(forecast_years):
if forecast_years is None:
return ""
forecast_spec = "forecast{\n"
forecast_spec += "maxlead = ({0})\n}}\n".format(forecast_years)
return forecast_spec
def _check_errors(errors):
errors = errors[errors.find("spc:")+4:].strip()
if errors and 'ERROR' in errors:
raise X13Error(errors)
elif errors and 'WARNING' in errors:
warn(errors, X13Warning)
def _convert_out_to_series(x, dates, name):
"""
Convert x to a DataFrame where x is a string in the format given by
x-13arima-seats output.
"""
from StringIO import StringIO
from pandas import read_table
out = read_table(StringIO(x), skiprows=2, header=None)
return out.set_index(dates).rename(columns={1 : name})[name]
def _open_and_read(fname):
# opens a file, reads it, and make sure it's closed
with open(fname, 'r') as fin:
fout = fin.read()
return fout
class Spec(object):
@property
def spec_name(self):
return self.__class__.__name__.replace("Spec", "")
def create_spec(self, **kwargs):
spec = """{name} {{
{options}
}}
"""
return spec.format(name=self.spec_name,
options=self.options)
def set_options(self, **kwargs):
options = ""
for key, value in kwargs.iteritems():
options += "{0}={1}\n".format(key, value)
self.__dict__.update({key : value})
self.options = options
class SeriesSpec(Spec):
"""
Parameters
----------
data
appendbcst : bool
appendfcst : bool
comptype
compwt
decimals
modelspan
name
period
precision
to_print
to_save
span
start
title
type
Notes
-----
Rarely used arguments
divpower
missingcode
missingval
saveprecision
trimzero
"""
def __init__(self, data, name='Unnamed Series', appendbcst=False,
appendfcst=False,
comptype=None, compwt=1, decimals=0, modelspan=(),
period=12, precision=0, to_print=[], to_save=[], span=(),
start=(1, 1), title='', series_type=None, divpower=None,
missingcode=-99999, missingval=1000000000):
appendbcst, appendfcst = map(_bool_to_yes_no, [appendbcst,
appendfcst,
])
series_name = "\"{0}\"".format(name[:64]) # trim to 64 characters
title = "\"{0}\"".format(title[:79]) # trim to 79 characters
self.set_options(data=data, appendbcst=appendbcst,
appendfcst=appendfcst, period=period, start=start,
title=title, name=series_name,
)
def pandas_to_series_spec(x):
# from statsmodels.tools.data import _check_period_index
# check_period_index(x)
if hasattr(x, 'columns'): # convert to series
if len(x.columns) > 1:
raise ValueError("Does not handle DataFrame with more than one "
"column")
x = x[x.columns[0]]
data = "({0})".format("\n".join(map(str, x.values.tolist())))
# get periodicity
# get start / first data
# give it a title
try:
period = _freq_to_period[x.index.freqstr]
except (AttributeError, ValueError):
from pandas.tseries.api import infer_freq
period = _freq_to_period[infer_freq(x.index)]
start_date = x.index[0]
if period == 12:
year, stperiod = start_date.year, start_date.month
elif period == 4:
year, stperiod = start_date.year, start_date.quarter
else: # pragma: no cover
raise ValueError("Only monthly and quarterly periods are supported."
" Please report or send a pull request if you want "
"this extended.")
if hasattr(x, 'name'):
name = x.name or "Unnamed Series"
else:
name = 'Unnamed Series'
series_spec = SeriesSpec(data=data, name=name, period=period,
title=name, start="{0}.{1}".format(year,
stperiod))
return series_spec
def x13_arima_analysis(endog, maxorder=(2, 1), maxdiff=(2, 1), diff=None,
exog=None, log=None, outlier=True, trading=False,
forecast_years=None, retspec=False,
speconly=False, start=None, freq=None,
print_stdout=False, x12path=None, prefer_x13=True):
"""
Perform x13-arima analysis for monthly or quarterly data.
Parameters
----------
endog : array-like, pandas.Series
The series to model. It is best to use a pandas object with a
DatetimeIndex or PeriodIndex. However, you can pass an array-like
object. If your object does not have a dates index then ``start`` and
``freq`` are not optional.
maxorder : tuple
The maximum order of the regular and seasonal ARMA polynomials to
examine during the model identification. The order for the regular
polynomial must be greater than zero and no larger than 4. The
order for the seaonal polynomial may be 1 or 2.
maxdiff : tuple
The maximum orders for regular and seasonal differencing in the
automatic differencing procedure. Acceptable inputs for regular
differencing are 1 and 2. The maximum order for seasonal differencing
is 1. If ``diff`` is specified then ``maxdiff`` should be None.
Otherwise, ``diff`` will be ignored. See also ``diff``.
diff : tuple
Fixes the orders of differencing for the regular and seasonal
differencing. Regular differencing may be 0, 1, or 2. Seasonal
differencing may be 0 or 1. ``maxdiff`` must be None, otherwise
``diff`` is ignored.
exog : array-like
Exogenous variables.
log : bool or None
If None, it is automatically determined whether to log the series or
not. If False, logs are not taken. If True, logs are taken.
outlier : bool
Whether or not outliers are tested for and corrected, if detected.
trading : bool
Whether or not trading day effects are tested for.
forecast_years : int
Number of forecasts produced. The default is one year.
retspec : bool
Whether to return the created specification file. Can be useful for
debugging.
speconly : bool
Whether to create the specification file and then return it without
performing the analysis. Can be useful for debugging.
start : str, datetime
Must be given if ``endog`` does not have date information in its index.
Anything accepted by pandas.DatetimeIndex for the start value.
freq : str
Must be givein if ``endog`` does not have date information in its
index. Anything accapted by pandas.DatetimeIndex for the freq value.
print_stdout : bool
The stdout from X12/X13 is suppressed. To print it out, set this
to True. Default is False.
x12path : str or None
The path to x12 or x13 binary. If None, the program will attempt
to find x13as or x12a on the PATH or by looking at X13PATH or
X12PATH depending on the value of prefer_x13.
prefer_x13 : bool
If True, will look for x13as first and will fallback to the X13PATH
environmental variable. If False, will look for x12a first and will
fallback to the X12PATH environmental variable. If x12path points
to the path for the X12/X13 binary, it does nothing.
Returns
-------
res : Bunch
A bunch object with the following attributes:
- results : str
The full output from the X12/X13 run.
- seasadj : pandas.Series
The final seasonally adjusted ``endog``
- trend : pandas.Series
The trend-cycle component of ``endog``
- irregular : pandas.Series
The final irregular component of ``endog``
- stdout : str
The captured stdout produced by x12/x13.
- spec : str, optional
Returned if ``retspec`` is True. The only thing returned if
``speconly`` is True.
Notes
-----
This works by creating a specification file, writing it to a temporary
directory, invoking X12/X13 in a subprocess, and reading the output
directory, invoking exog12/X13 in a subprocess, and reading the output
back in.
"""
x12path = _check_x12(x12path)
if not isinstance(endog, (pd.DataFrame, pd.Series)):
if start is None or freq is None:
raise ValueError("start and freq cannot be none if endog is not "
"a pandas object")
endog = pd.Series(endog, index=pd.DatetimeIndex(start=start,
periods=len(endog),
freq=freq))
spec_obj = pandas_to_series_spec(endog)
spec = spec_obj.create_spec()
spec += "transform{{function={0}}}\n".format(_log_to_x12[log])
if outlier:
spec += "outlier{}\n"
options = _make_automdl_options(maxorder, maxdiff, diff)
spec += "automdl{{{0}}}\n".format(options)
spec += _make_regression_options(trading, exog)
spec += _make_forecast_options(forecast_years)
spec += "x11{ save=(d11 d12 d13) }"
if speconly:
return spec
# write it to a tempfile
# TODO: make this more robust - give the user some control?
ftempin = tempfile.NamedTemporaryFile(delete=False, suffix='.spc')
ftempout = tempfile.NamedTemporaryFile(delete=False)
try:
ftempin.write(spec)
ftempin.close()
ftempout.close()
# call x12 arima
p = run_spec(x12path, ftempin.name[:-4], ftempout.name)
p.wait()
stdout = p.stdout.read()
if print_stdout:
print(p.stdout.read())
# check for errors
errors = _open_and_read(ftempout.name + '.err')
_check_errors(errors)
# read in results
results = _open_and_read(ftempout.name + '.out')
seasadj = _open_and_read(ftempout.name + '.d11')
trend = _open_and_read(ftempout.name + '.d12')
irregular = _open_and_read(ftempout.name + '.d13')
finally:
try: # sometimes this gives a permission denied error?
# not sure why. no process should have these open
os.remove(ftempin.name)
os.remove(ftempout.name)
except:
if os.path.exists(ftempin.name):
warn("Failed to delete resource {0}".format(ftempin.name),
IOWarning)
if os.path.exists(ftempout.name):
warn("Failed to delete resource {0}".format(ftempout.name),
IOWarning)
seasadj = _convert_out_to_series(seasadj, endog.index, 'seasadj')
trend = _convert_out_to_series(trend, endog.index, 'trend')
irregular = _convert_out_to_series(irregular, endog.index, 'irregular')
# NOTE: there isn't likely anything in stdout that's not in results
# so may be safe to just suppress and remove it
if not retspec:
res = X13ArimaAnalysisResult(observed=endog, results=results,
seasadj=seasadj, trend=trend,
irregular=irregular, stdout=stdout)
else:
res = X13ArimaAnalysisResult(observed=endog, results=results,
seasadj=seasadj, trend=trend,
irregular=irregular, stdout=stdout,
spec=spec)
return res
def x13_arima_select_order(endog, maxorder=(2, 1), maxdiff=(2, 1), diff=None,
exog=None, log=None, outlier=True, trading=False,
forecast_years=None,
start=None, freq=None, print_stdout=False,
x12path=None, prefer_x13=True):
"""
Perform automatic seaonal ARIMA order identification using x12/x13 ARIMA.
Parameters
----------
endog : array-like, pandas.Series
The series to model. It is best to use a pandas object with a
DatetimeIndex or PeriodIndex. However, you can pass an array-like
object. If your object does not have a dates index then ``start`` and
``freq`` are not optional.
maxorder : tuple
The maximum order of the regular and seasonal ARMA polynomials to
examine during the model identification. The order for the regular
polynomial must be greater than zero and no larger than 4. The
order for the seaonal polynomial may be 1 or 2.
maxdiff : tuple
The maximum orders for regular and seasonal differencing in the
automatic differencing procedure. Acceptable inputs for regular
differencing are 1 and 2. The maximum order for seasonal differencing
is 1. If ``diff`` is specified then ``maxdiff`` should be None.
Otherwise, ``diff`` will be ignored. See also ``diff``.
diff : tuple
Fixes the orders of differencing for the regular and seasonal
differencing. Regular differencing may be 0, 1, or 2. Seasonal
differencing may be 0 or 1. ``maxdiff`` must be None, otherwise
``diff`` is ignored.
exog : array-like
Exogenous variables.
log : bool or None
If None, it is automatically determined whether to log the series or
not. If False, logs are not taken. If True, logs are taken.
outlier : bool
Whether or not outliers are tested for and corrected, if detected.
trading : bool
Whether or not trading day effects are tested for.
forecast_years : int
Number of forecasts produced. The default is one year.
start : str, datetime
Must be given if ``endog`` does not have date information in its index.
Anything accepted by pandas.DatetimeIndex for the start value.
freq : str
Must be givein if ``endog`` does not have date information in its
index. Anything accapted by pandas.DatetimeIndex for the freq value.
print_stdout : bool
The stdout from X12/X13 is suppressed. To print it out, set this
to True. Default is False.
x12path : str or None
The path to x12 or x13 binary. If None, the program will attempt
to find x13as or x12a on the PATH or by looking at X13PATH or X12PATH
depending on the value of prefer_x13.
prefer_x13 : bool
If True, will look for x13as first and will fallback to the X13PATH
environmental variable. If False, will look for x12a first and will
fallback to the X12PATH environmental variable. If x12path points
to the path for the X12/X13 binary, it does nothing.
Returns
-------
results : Bunch
A bunch object that has the following attributes:
- order : tuple
The regular order
- sorder : tuple
The seasonal order
- include_mean : bool
Whether to include a mean or not
- results : str
The full results from the X12/X13 analysis
- stdout : str
The captured stdout from the X12/X13 analysis
Notes
-----
This works by creating a specification file, writing it to a temporary
directory, invoking X12/X13 in a subprocess, and reading the output back
in.
"""
results = x13_arima_analysis(endog, x12path=x12path, exog=exog, log=log,
outlier=outlier, trading=trading,
forecast_years=forecast_years,
maxorder=maxorder, maxdiff=maxdiff, diff=diff,
start=start, freq=freq, prefer_x13=prefer_x13)
model = re.search("(?<=Final automatic model choice : ).*",
results.results)
order = model.group()
if re.search("Mean is not significant", results.results):
include_mean = False
elif re.search("Constant", results.results):
include_mean = True
else:
include_mean = False
order, sorder = _clean_order(order)
res = Bunch(order=order, sorder=sorder, include_mean=include_mean,
results=results.results, stdout=results.stdout)
return res
class X13ArimaAnalysisResult(object):
def __init__(self, **kwargs):
for key, value in iteritems(kwargs):
setattr(self, key, value)
def plot(self):
from statsmodels.graphics.utils import _import_mpl
plt = _import_mpl()
fig, axes = plt.subplots(4, 1, sharex=True)
self.observed.plot(ax=axes[0], legend=False)
axes[0].set_ylabel('Observed')
self.seasadj.plot(ax=axes[1], legend=False)
axes[1].set_ylabel('Seas. Adjusted')
self.trend.plot(ax=axes[2], legend=False)
axes[2].set_ylabel('Trend')
self.irregular.plot(ax=axes[3], legend=False)
axes[3].set_ylabel('Irregular')
fig.tight_layout()
return fig
if __name__ == "__main__":
import numpy as np
from statsmodels.tsa.arima_process import ArmaProcess
np.random.seed(123)
ar = [1, .35, .8]
ma = [1, .8]
arma = ArmaProcess(ar, ma, nobs=100)
assert arma.isstationary()
assert arma.isinvertible()
y = arma.generate_sample()
dates = pd.date_range("1/1/1990", periods=len(y), freq='M')
ts = pd.TimeSeries(y, index=dates)
xpath = "/home/skipper/src/x12arima/x12a"
try:
results = x13_arima_analysis(xpath, ts)
except:
print("Caught exception")
results = x13_arima_analysis(xpath, ts, log=False)
# import pandas as pd
# seas_y = pd.read_csv("usmelec.csv")
# seas_y = pd.TimeSeries(seas_y["usmelec"].values,
# index=pd.DatetimeIndex(seas_y["date"], freq="MS"))
# results = x13_arima_analysis(xpath, seas_y)
| bsd-3-clause |
sowravi/sp17-i524 | project/S17-IO-3017/code/projectearth/kmeansplot.py | 14 | 3018 | import requests
import time
import dblayer
import plotly
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import random
from sklearn.cluster import KMeans
import testfile
NUM_CLUSTER = 3
def generate_color():
color = '#{:02x}{:02x}{:02x}'.format(*map(lambda x: random.randint(0, 255), range(NUM_CLUSTER)))
return color
# Create random colors in list
color_list = []
for i in range(NUM_CLUSTER):
color_list.append(generate_color())
def showMagnitudesInCluster(data):
kmeans = KMeans(n_clusters=NUM_CLUSTER)
kmeans.fit(data)
labels = kmeans.labels_
centroids = kmeans.cluster_centers_
plot_data = []
for i in range(NUM_CLUSTER):
ds = data[np.where(labels == i)]
clustername = "Cluster " + str(i+1)
trace = go.Scatter(x=ds[:, 0], y=ds[:, 1], mode='markers', showlegend='false', name=clustername, marker=dict(size=5, color=color_list[i]))
plot_data.append(trace)
# plot the centroids
trace = go.Scatter(x=centroids[i, 0], y=centroids[i, 1], mode='markers', marker=dict(size=10, color='black'))
plot_data.append(trace)
layout = go.Layout(title='Magnitude Vs. Depth - K-Means Clusters', titlefont=dict(family='Courier New, monospace',size=20,color='#7f7f7f'),
xaxis=dict(title='Depth of Earthquake', titlefont=dict(family='Courier New, monospace',size=18,color='#7f7f7f')),
yaxis=dict(title='Magnitude',titlefont=dict(family='Courier New, monospace',size=18,color='#7f7f7f'))
)
fig = go.Figure(data=plot_data, layout=layout)
div = plotly.offline.plot(fig, include_plotlyjs=True, output_type='div')
return div
def mkMag():
#### TME: Get start time
start_time = time.time()
####
sess = requests.Session()
dbobj = dblayer.classDBLayer()
projection = [
{"$project": {"_id": 0, "mag": "$properties.mag", "depth": {"$arrayElemAt": ["$geometry.coordinates", 2]}}}]
dframe_mag = pd.DataFrame(list(dbobj.doaggregate(projection)))
#### TME: Elapsed time taken to read data from MongoDB
fileobj = testfile.classFileWrite()
elapsed = time.time() - start_time
fileobj.writeline()
str1 = str(elapsed) + " secs required to read " + str(dframe_mag['depth'].count()) + " records from database."
fileobj.writelog("Reading Magnitude and Depth data")
fileobj.writelog(str1)
####
#### TME: Get start time
start_time = time.time()
####
div = showMagnitudesInCluster(dframe_mag.values)
response = """<html><title></title><head><meta charset=\"utf8\"> </head> <body>""" + div + """</body> </html>"""
#### TME: Elapsed time taken to cluster and plot data
elapsed = time.time() - start_time
fileobj.writeline()
str1 = "Applying K-Means clustering and plotting its output \n" + "Time taken: " + str(elapsed)
fileobj.writelog(str1)
fileobj.writeline()
fileobj.closefile()
dbobj.closedb()
return response
| apache-2.0 |
sternb0t/django-pandas | django_pandas/io.py | 1 | 3578 | import pandas as pd
from .utils import update_with_verbose
import django
def to_fields(qs, fieldnames):
for fieldname in fieldnames:
model = qs.model
for fieldname_part in fieldname.split('__'):
try:
field = model._meta.get_field(fieldname_part)
except django.db.models.fields.FieldDoesNotExist:
rels = model._meta.get_all_related_objects_with_model()
for relobj, _ in rels:
if relobj.get_accessor_name() == fieldname_part:
field = relobj.field
model = field.model
break
else:
if hasattr(field, "one_to_many") and field.one_to_many:
model = field.related_model
elif field.get_internal_type() in ('ForeignKey', 'OneToOneField', 'ManyToManyField'):
model = field.rel.to
yield field
def read_frame(qs, fieldnames=(), index_col=None, coerce_float=False,
verbose=True):
"""
Returns a dataframe from a QuerySet
Optionally specify the field names/columns to utilize and
a field as the index
Parameters
----------
qs: The Django QuerySet.
fieldnames: The model field names to use in creating the frame.
You can span a relationship in the usual Django way
by using double underscores to specify a related field
in another model
You can span a relationship in the usual Django way
by using double underscores to specify a related field
in another model
index_col: specify the field to use for the index. If the index
field is not in the field list it will be appended
coerce_float : boolean, default False
Attempt to convert values to non-string, non-numeric data (like
decimal.Decimal) to floating point, useful for SQL result sets
verbose: boolean If this is ``True`` then populate the DataFrame with the
human readable versions of any foreign key fields else use
the primary keys values.
The human readable version of the foreign key field is
defined in the ``__unicode__`` or ``__str__``
methods of the related class definition
"""
if fieldnames:
if index_col is not None and index_col not in fieldnames:
# Add it to the field names if not already there
fieldnames = tuple(fieldnames) + (index_col,)
fields = to_fields(qs, fieldnames)
elif isinstance(qs, django.db.models.query.ValuesQuerySet):
if django.VERSION < (1, 8):
annotation_field_names = qs.aggregate_names
else:
annotation_field_names = qs.annotation_names
fieldnames = qs.field_names + annotation_field_names + qs.extra_names
fields = [qs.model._meta.get_field(f) for f in qs.field_names] + \
[None] * (len(annotation_field_names) + len(qs.extra_names))
else:
fields = qs.model._meta.fields
fieldnames = [f.name for f in fields]
if isinstance(qs, django.db.models.query.ValuesQuerySet):
recs = list(qs)
else:
recs = list(qs.values_list(*fieldnames))
df = pd.DataFrame.from_records(recs, columns=fieldnames,
coerce_float=coerce_float)
if verbose:
update_with_verbose(df, fieldnames, fields)
if index_col is not None:
df.set_index(index_col, inplace=True)
return df
| bsd-3-clause |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/matplotlib/figure.py | 10 | 58719 | """
The figure module provides the top-level
:class:`~matplotlib.artist.Artist`, the :class:`Figure`, which
contains all the plot elements. The following classes are defined
:class:`SubplotParams`
control the default spacing of the subplots
:class:`Figure`
top level container for all plot elements
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
from operator import itemgetter
import numpy as np
from matplotlib import rcParams
from matplotlib import docstring
from matplotlib import __version__ as _mpl_version
import matplotlib.artist as martist
from matplotlib.artist import Artist, allow_rasterization
import matplotlib.cbook as cbook
from matplotlib.cbook import Stack, iterable
from matplotlib import _image
from matplotlib.image import FigureImage
import matplotlib.colorbar as cbar
from matplotlib.axes import Axes, SubplotBase, subplot_class_factory
from matplotlib.blocking_input import BlockingMouseInput, BlockingKeyMouseInput
from matplotlib.legend import Legend
from matplotlib.patches import Rectangle
from matplotlib.projections import (get_projection_names,
process_projection_requirements)
from matplotlib.text import Text, _process_text_args
from matplotlib.transforms import (Affine2D, Bbox, BboxTransformTo,
TransformedBbox)
from matplotlib.backend_bases import NonGuiException
docstring.interpd.update(projection_names=get_projection_names())
class AxesStack(Stack):
"""
Specialization of the Stack to handle all tracking of Axes in a Figure.
This stack stores ``key, (ind, axes)`` pairs, where:
* **key** should be a hash of the args and kwargs
used in generating the Axes.
* **ind** is a serial number for tracking the order
in which axes were added.
The AxesStack is a callable, where ``ax_stack()`` returns
the current axes. Alternatively the :meth:`current_key_axes` will
return the current key and associated axes.
"""
def __init__(self):
Stack.__init__(self)
self._ind = 0
def as_list(self):
"""
Return a list of the Axes instances that have been added to the figure
"""
ia_list = [a for k, a in self._elements]
ia_list.sort()
return [a for i, a in ia_list]
def get(self, key):
"""
Return the Axes instance that was added with *key*.
If it is not present, return None.
"""
item = dict(self._elements).get(key)
if item is None:
return None
return item[1]
def _entry_from_axes(self, e):
ind, k = dict([(a, (ind, k)) for (k, (ind, a)) in self._elements])[e]
return (k, (ind, e))
def remove(self, a):
"""Remove the axes from the stack."""
Stack.remove(self, self._entry_from_axes(a))
def bubble(self, a):
"""
Move the given axes, which must already exist in the
stack, to the top.
"""
return Stack.bubble(self, self._entry_from_axes(a))
def add(self, key, a):
"""
Add Axes *a*, with key *key*, to the stack, and return the stack.
If *a* is already on the stack, don't add it again, but
return *None*.
"""
# All the error checking may be unnecessary; but this method
# is called so seldom that the overhead is negligible.
if not isinstance(a, Axes):
raise ValueError("second argument, %s, is not an Axes" % a)
try:
hash(key)
except TypeError:
raise ValueError("first argument, %s, is not a valid key" % key)
a_existing = self.get(key)
if a_existing is not None:
Stack.remove(self, (key, a_existing))
warnings.warn(
"key %s already existed; Axes is being replaced" % key)
# I don't think the above should ever happen.
if a in self:
return None
self._ind += 1
return Stack.push(self, (key, (self._ind, a)))
def current_key_axes(self):
"""
Return a tuple of ``(key, axes)`` for the active axes.
If no axes exists on the stack, then returns ``(None, None)``.
"""
if not len(self._elements):
return self._default, self._default
else:
key, (index, axes) = self._elements[self._pos]
return key, axes
def __call__(self):
return self.current_key_axes()[1]
def __contains__(self, a):
return a in self.as_list()
class SubplotParams:
"""
A class to hold the parameters for a subplot
"""
def __init__(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
All dimensions are fraction of the figure width or height.
All values default to their rc params
The following attributes are available
*left* : 0.125
The left side of the subplots of the figure
*right* : 0.9
The right side of the subplots of the figure
*bottom* : 0.1
The bottom of the subplots of the figure
*top* : 0.9
The top of the subplots of the figure
*wspace* : 0.2
The amount of width reserved for blank space between subplots
*hspace* : 0.2
The amount of height reserved for white space between subplots
"""
self.validate = True
self.update(left, bottom, right, top, wspace, hspace)
def update(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
Update the current values. If any kwarg is None, default to
the current value, if set, otherwise to rc
"""
thisleft = getattr(self, 'left', None)
thisright = getattr(self, 'right', None)
thistop = getattr(self, 'top', None)
thisbottom = getattr(self, 'bottom', None)
thiswspace = getattr(self, 'wspace', None)
thishspace = getattr(self, 'hspace', None)
self._update_this('left', left)
self._update_this('right', right)
self._update_this('bottom', bottom)
self._update_this('top', top)
self._update_this('wspace', wspace)
self._update_this('hspace', hspace)
def reset():
self.left = thisleft
self.right = thisright
self.top = thistop
self.bottom = thisbottom
self.wspace = thiswspace
self.hspace = thishspace
if self.validate:
if self.left >= self.right:
reset()
raise ValueError('left cannot be >= right')
if self.bottom >= self.top:
reset()
raise ValueError('bottom cannot be >= top')
def _update_this(self, s, val):
if val is None:
val = getattr(self, s, None)
if val is None:
key = 'figure.subplot.' + s
val = rcParams[key]
setattr(self, s, val)
class Figure(Artist):
"""
The Figure instance supports callbacks through a *callbacks*
attribute which is a :class:`matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'dpi_changed', and
the callback will be called with ``func(fig)`` where fig is the
:class:`Figure` instance.
*patch*
The figure patch is drawn by a
:class:`matplotlib.patches.Rectangle` instance
*suppressComposite*
For multiple figure images, the figure will make composite
images depending on the renderer option_image_nocomposite
function. If suppressComposite is True|False, this will
override the renderer.
"""
def __str__(self):
return "Figure(%gx%g)" % tuple(self.bbox.size)
def __init__(self,
figsize=None, # defaults to rc figure.figsize
dpi=None, # defaults to rc figure.dpi
facecolor=None, # defaults to rc figure.facecolor
edgecolor=None, # defaults to rc figure.edgecolor
linewidth=0.0, # the default linewidth of the frame
frameon=None, # whether or not to draw the figure frame
subplotpars=None, # default to rc
tight_layout=None, # default to rc figure.autolayout
):
"""
*figsize*
w,h tuple in inches
*dpi*
Dots per inch
*facecolor*
The figure patch facecolor; defaults to rc ``figure.facecolor``
*edgecolor*
The figure patch edge color; defaults to rc ``figure.edgecolor``
*linewidth*
The figure patch edge linewidth; the default linewidth of the frame
*frameon*
If *False*, suppress drawing the figure frame
*subplotpars*
A :class:`SubplotParams` instance, defaults to rc
*tight_layout*
If *False* use *subplotpars*; if *True* adjust subplot
parameters using :meth:`tight_layout` with default padding.
When providing a dict containing the keys `pad`, `w_pad`, `h_pad`
and `rect`, the default :meth:`tight_layout` paddings will be
overridden.
Defaults to rc ``figure.autolayout``.
"""
Artist.__init__(self)
self.callbacks = cbook.CallbackRegistry()
if figsize is None:
figsize = rcParams['figure.figsize']
if dpi is None:
dpi = rcParams['figure.dpi']
if facecolor is None:
facecolor = rcParams['figure.facecolor']
if edgecolor is None:
edgecolor = rcParams['figure.edgecolor']
if frameon is None:
frameon = rcParams['figure.frameon']
self.dpi_scale_trans = Affine2D()
self.dpi = dpi
self.bbox_inches = Bbox.from_bounds(0, 0, *figsize)
self.bbox = TransformedBbox(self.bbox_inches, self.dpi_scale_trans)
self.frameon = frameon
self.transFigure = BboxTransformTo(self.bbox)
# the figurePatch name is deprecated
self.patch = self.figurePatch = Rectangle(
xy=(0, 0), width=1, height=1,
facecolor=facecolor, edgecolor=edgecolor,
linewidth=linewidth)
self._set_artist_props(self.patch)
self.patch.set_aa(False)
self._hold = rcParams['axes.hold']
self.canvas = None
self._suptitle = None
if subplotpars is None:
subplotpars = SubplotParams()
self.subplotpars = subplotpars
self.set_tight_layout(tight_layout)
self._axstack = AxesStack() # track all figure axes and current axes
self.clf()
self._cachedRenderer = None
# TODO: I'd like to dynamically add the _repr_html_ method
# to the figure in the right context, but then IPython doesn't
# use it, for some reason.
def _repr_html_(self):
# We can't use "isinstance" here, because then we'd end up importing
# webagg unconditiionally.
if (self.canvas is not None and
'WebAgg' in self.canvas.__class__.__name__):
from matplotlib.backends import backend_webagg
return backend_webagg.ipython_inline_display(self)
def show(self, warn=True):
"""
If using a GUI backend with pyplot, display the figure window.
If the figure was not created using
:func:`~matplotlib.pyplot.figure`, it will lack a
:class:`~matplotlib.backend_bases.FigureManagerBase`, and
will raise an AttributeError.
For non-GUI backends, this does nothing, in which case
a warning will be issued if *warn* is True (default).
"""
try:
manager = getattr(self.canvas, 'manager')
except AttributeError as err:
raise AttributeError("%s\n"
"Figure.show works only "
"for figures managed by pyplot, normally "
"created by pyplot.figure()." % err)
if manager is not None:
try:
manager.show()
return
except NonGuiException:
pass
if warn:
import warnings
warnings.warn(
"matplotlib is currently using a non-GUI backend, "
"so cannot show the figure")
def _get_axes(self):
return self._axstack.as_list()
axes = property(fget=_get_axes, doc="Read-only: list of axes in Figure")
def _get_dpi(self):
return self._dpi
def _set_dpi(self, dpi):
self._dpi = dpi
self.dpi_scale_trans.clear().scale(dpi, dpi)
self.callbacks.process('dpi_changed', self)
dpi = property(_get_dpi, _set_dpi)
def get_tight_layout(self):
"""
Return the Boolean flag, True to use :meth`tight_layout` when drawing.
"""
return self._tight
def set_tight_layout(self, tight):
"""
Set whether :meth:`tight_layout` is used upon drawing.
If None, the rcParams['figure.autolayout'] value will be set.
When providing a dict containing the keys `pad`, `w_pad`, `h_pad`
and `rect`, the default :meth:`tight_layout` paddings will be
overridden.
ACCEPTS: [True | False | dict | None ]
"""
if tight is None:
tight = rcParams['figure.autolayout']
self._tight = bool(tight)
self._tight_parameters = tight if isinstance(tight, dict) else {}
def autofmt_xdate(self, bottom=0.2, rotation=30, ha='right'):
"""
Date ticklabels often overlap, so it is useful to rotate them
and right align them. Also, a common use case is a number of
subplots with shared xaxes where the x-axis is date data. The
ticklabels are often long, and it helps to rotate them on the
bottom subplot and turn them off on other subplots, as well as
turn off xlabels.
*bottom*
The bottom of the subplots for :meth:`subplots_adjust`
*rotation*
The rotation of the xtick labels
*ha*
The horizontal alignment of the xticklabels
"""
allsubplots = np.alltrue([hasattr(ax, 'is_last_row') for ax
in self.axes])
if len(self.axes) == 1:
for label in self.axes[0].get_xticklabels():
label.set_ha(ha)
label.set_rotation(rotation)
else:
if allsubplots:
for ax in self.get_axes():
if ax.is_last_row():
for label in ax.get_xticklabels():
label.set_ha(ha)
label.set_rotation(rotation)
else:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.set_xlabel('')
if allsubplots:
self.subplots_adjust(bottom=bottom)
def get_children(self):
'get a list of artists contained in the figure'
children = [self.patch]
children.extend(self.artists)
children.extend(self.axes)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.images)
children.extend(self.legends)
return children
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the figure.
Returns True,{}
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
# inside = mouseevent.x >= 0 and mouseevent.y >= 0
inside = self.bbox.contains(mouseevent.x, mouseevent.y)
return inside, {}
def get_window_extent(self, *args, **kwargs):
'get the figure bounding box in display space; kwargs are void'
return self.bbox
def suptitle(self, t, **kwargs):
"""
Add a centered title to the figure.
kwargs are :class:`matplotlib.text.Text` properties. Using figure
coordinates, the defaults are:
*x* : 0.5
The x location of the text in figure coords
*y* : 0.98
The y location of the text in figure coords
*horizontalalignment* : 'center'
The horizontal alignment of the text
*verticalalignment* : 'top'
The vertical alignment of the text
A :class:`matplotlib.text.Text` instance is returned.
Example::
fig.suptitle('this is the figure title', fontsize=12)
"""
x = kwargs.pop('x', 0.5)
y = kwargs.pop('y', 0.98)
if ('horizontalalignment' not in kwargs) and ('ha' not in kwargs):
kwargs['horizontalalignment'] = 'center'
if ('verticalalignment' not in kwargs) and ('va' not in kwargs):
kwargs['verticalalignment'] = 'top'
sup = self.text(x, y, t, **kwargs)
if self._suptitle is not None:
self._suptitle.set_text(t)
self._suptitle.set_position((x, y))
self._suptitle.update_from(sup)
sup.remove()
else:
self._suptitle = sup
return self._suptitle
def set_canvas(self, canvas):
"""
Set the canvas the contains the figure
ACCEPTS: a FigureCanvas instance
"""
self.canvas = canvas
def hold(self, b=None):
"""
Set the hold state. If hold is None (default), toggle the
hold state. Else set the hold state to boolean value b.
e.g.::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def figimage(self, X,
xo=0,
yo=0,
alpha=None,
norm=None,
cmap=None,
vmin=None,
vmax=None,
origin=None,
**kwargs):
"""
Adds a non-resampled image to the figure.
call signatures::
figimage(X, **kwargs)
adds a non-resampled array *X* to the figure.
::
figimage(X, xo, yo)
with pixel offsets *xo*, *yo*,
*X* must be a float array:
* If *X* is MxN, assume luminance (grayscale)
* If *X* is MxNx3, assume RGB
* If *X* is MxNx4, assume RGBA
Optional keyword arguments:
========= =========================================================
Keyword Description
========= =========================================================
xo or yo An integer, the *x* and *y* image offset in pixels
cmap a :class:`matplotlib.colors.Colormap` instance, e.g.,
cm.jet. If *None*, default to the rc ``image.cmap``
value
norm a :class:`matplotlib.colors.Normalize` instance. The
default is normalization(). This scales luminance -> 0-1
vmin|vmax are used to scale a luminance image to 0-1. If either
is *None*, the min and max of the luminance values will
be used. Note if you pass a norm instance, the settings
for *vmin* and *vmax* will be ignored.
alpha the alpha blending value, default is *None*
origin [ 'upper' | 'lower' ] Indicates where the [0,0] index of
the array is in the upper left or lower left corner of
the axes. Defaults to the rc image.origin value
========= =========================================================
figimage complements the axes image
(:meth:`~matplotlib.axes.Axes.imshow`) which will be resampled
to fit the current axes. If you want a resampled image to
fill the entire figure, you can define an
:class:`~matplotlib.axes.Axes` with size [0,1,0,1].
An :class:`matplotlib.image.FigureImage` instance is returned.
.. plot:: mpl_examples/pylab_examples/figimage_demo.py
Additional kwargs are Artist kwargs passed on to
:class:`~matplotlib.image.FigureImage`
"""
if not self._hold:
self.clf()
im = FigureImage(self, cmap, norm, xo, yo, origin, **kwargs)
im.set_array(X)
im.set_alpha(alpha)
if norm is None:
im.set_clim(vmin, vmax)
self.images.append(im)
im._remove_method = lambda h: self.images.remove(h)
return im
def set_size_inches(self, *args, **kwargs):
"""
set_size_inches(w,h, forward=False)
Set the figure size in inches (1in == 2.54cm)
Usage::
fig.set_size_inches(w,h) # OR
fig.set_size_inches((w,h) )
optional kwarg *forward=True* will cause the canvas size to be
automatically updated; e.g., you can resize the figure window
from the shell
ACCEPTS: a w,h tuple with w,h in inches
See Also
--------
matplotlib.Figure.get_size_inches
"""
forward = kwargs.get('forward', False)
if len(args) == 1:
w, h = args[0]
else:
w, h = args
dpival = self.dpi
self.bbox_inches.p1 = w, h
if forward:
dpival = self.dpi
canvasw = w * dpival
canvash = h * dpival
manager = getattr(self.canvas, 'manager', None)
if manager is not None:
manager.resize(int(canvasw), int(canvash))
def get_size_inches(self):
"""
Returns the current size of the figure in inches (1in == 2.54cm)
as an numpy array.
Returns
-------
size : ndarray
The size of the figure in inches
See Also
--------
matplotlib.Figure.set_size_inches
"""
return np.array(self.bbox_inches.p1)
def get_edgecolor(self):
'Get the edge color of the Figure rectangle'
return self.patch.get_edgecolor()
def get_facecolor(self):
'Get the face color of the Figure rectangle'
return self.patch.get_facecolor()
def get_figwidth(self):
'Return the figwidth as a float'
return self.bbox_inches.width
def get_figheight(self):
'Return the figheight as a float'
return self.bbox_inches.height
def get_dpi(self):
'Return the dpi as a float'
return self.dpi
def get_frameon(self):
'get the boolean indicating frameon'
return self.frameon
def set_edgecolor(self, color):
"""
Set the edge color of the Figure rectangle
ACCEPTS: any matplotlib color - see help(colors)
"""
self.patch.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the face color of the Figure rectangle
ACCEPTS: any matplotlib color - see help(colors)
"""
self.patch.set_facecolor(color)
def set_dpi(self, val):
"""
Set the dots-per-inch of the figure
ACCEPTS: float
"""
self.dpi = val
def set_figwidth(self, val):
"""
Set the width of the figure in inches
ACCEPTS: float
"""
self.bbox_inches.x1 = val
def set_figheight(self, val):
"""
Set the height of the figure in inches
ACCEPTS: float
"""
self.bbox_inches.y1 = val
def set_frameon(self, b):
"""
Set whether the figure frame (background) is displayed or invisible
ACCEPTS: boolean
"""
self.frameon = b
def delaxes(self, a):
'remove a from the figure and update the current axes'
self._axstack.remove(a)
for func in self._axobservers:
func(self)
def _make_key(self, *args, **kwargs):
'make a hashable key out of args and kwargs'
def fixitems(items):
#items may have arrays and lists in them, so convert them
# to tuples for the key
ret = []
for k, v in items:
# some objects can define __getitem__ without being
# iterable and in those cases the conversion to tuples
# will fail. So instead of using the iterable(v) function
# we simply try and convert to a tuple, and proceed if not.
try:
v = tuple(v)
except Exception:
pass
ret.append((k, v))
return tuple(ret)
def fixlist(args):
ret = []
for a in args:
if iterable(a):
a = tuple(a)
ret.append(a)
return tuple(ret)
key = fixlist(args), fixitems(six.iteritems(kwargs))
return key
@docstring.dedent_interpd
def add_axes(self, *args, **kwargs):
"""
Add an axes at position *rect* [*left*, *bottom*, *width*,
*height*] where all quantities are in fractions of figure
width and height. kwargs are legal
:class:`~matplotlib.axes.Axes` kwargs plus *projection* which
sets the projection type of the axes. (For backward
compatibility, ``polar=True`` may also be provided, which is
equivalent to ``projection='polar'``). Valid values for
*projection* are: %(projection_names)s. Some of these
projections support additional kwargs, which may be provided
to :meth:`add_axes`. Typical usage::
rect = l,b,w,h
fig.add_axes(rect)
fig.add_axes(rect, frameon=False, axisbg='g')
fig.add_axes(rect, polar=True)
fig.add_axes(rect, projection='polar')
fig.add_axes(ax)
If the figure already has an axes with the same parameters,
then it will simply make that axes current and return it. If
you do not want this behavior, e.g., you want to force the
creation of a new Axes, you must use a unique set of args and
kwargs. The axes :attr:`~matplotlib.axes.Axes.label`
attribute has been exposed for this purpose. e.g., if you want
two axes that are otherwise identical to be added to the
figure, make sure you give them unique labels::
fig.add_axes(rect, label='axes1')
fig.add_axes(rect, label='axes2')
In rare circumstances, add_axes may be called with a single
argument, an Axes instance already created in the present
figure but not in the figure's list of axes. For example,
if an axes has been removed with :meth:`delaxes`, it can
be restored with::
fig.add_axes(ax)
In all cases, the :class:`~matplotlib.axes.Axes` instance
will be returned.
In addition to *projection*, the following kwargs are supported:
%(Axes)s
"""
if not len(args):
return
# shortcut the projection "key" modifications later on, if an axes
# with the exact args/kwargs exists, return it immediately.
key = self._make_key(*args, **kwargs)
ax = self._axstack.get(key)
if ax is not None:
self.sca(ax)
return ax
if isinstance(args[0], Axes):
a = args[0]
assert(a.get_figure() is self)
else:
rect = args[0]
projection_class, kwargs, key = process_projection_requirements(
self, *args, **kwargs)
# check that an axes of this type doesn't already exist, if it
# does, set it as active and return it
ax = self._axstack.get(key)
if ax is not None and isinstance(ax, projection_class):
self.sca(ax)
return ax
# create the new axes using the axes class given
a = projection_class(self, rect, **kwargs)
self._axstack.add(key, a)
self.sca(a)
return a
@docstring.dedent_interpd
def add_subplot(self, *args, **kwargs):
"""
Add a subplot. Examples::
fig.add_subplot(111)
# equivalent but more general
fig.add_subplot(1,1,1)
# add subplot with red background
fig.add_subplot(212, axisbg='r')
# add a polar subplot
fig.add_subplot(111, projection='polar')
# add Subplot instance sub
fig.add_subplot(sub)
*kwargs* are legal :class:`~matplotlib.axes.Axes` kwargs plus
*projection*, which chooses a projection type for the axes.
(For backward compatibility, *polar=True* may also be
provided, which is equivalent to *projection='polar'*). Valid
values for *projection* are: %(projection_names)s. Some of
these projections
support additional *kwargs*, which may be provided to
:meth:`add_axes`.
The :class:`~matplotlib.axes.Axes` instance will be returned.
If the figure already has a subplot with key (*args*,
*kwargs*) then it will simply make that subplot current and
return it.
.. seealso:: :meth:`~matplotlib.pyplot.subplot` for an
explanation of the args.
The following kwargs are supported:
%(Axes)s
"""
if not len(args):
return
if len(args) == 1 and isinstance(args[0], int):
args = tuple([int(c) for c in str(args[0])])
if len(args) != 3:
raise ValueError("Integer subplot specification must " +
"be a three digit number. " +
"Not {n:d}".format(n=len(args)))
if isinstance(args[0], SubplotBase):
a = args[0]
assert(a.get_figure() is self)
# make a key for the subplot (which includes the axes object id
# in the hash)
key = self._make_key(*args, **kwargs)
else:
projection_class, kwargs, key = process_projection_requirements(
self, *args, **kwargs)
# try to find the axes with this key in the stack
ax = self._axstack.get(key)
if ax is not None:
if isinstance(ax, projection_class):
# the axes already existed, so set it as active & return
self.sca(ax)
return ax
else:
# Undocumented convenience behavior:
# subplot(111); subplot(111, projection='polar')
# will replace the first with the second.
# Without this, add_subplot would be simpler and
# more similar to add_axes.
self._axstack.remove(ax)
a = subplot_class_factory(projection_class)(self, *args, **kwargs)
self._axstack.add(key, a)
self.sca(a)
return a
def clf(self, keep_observers=False):
"""
Clear the figure.
Set *keep_observers* to True if, for example,
a gui widget is tracking the axes in the figure.
"""
self.suppressComposite = None
self.callbacks = cbook.CallbackRegistry()
for ax in tuple(self.axes): # Iterate over the copy.
ax.cla()
self.delaxes(ax) # removes ax from self._axstack
toolbar = getattr(self.canvas, 'toolbar', None)
if toolbar is not None:
toolbar.update()
self._axstack.clear()
self.artists = []
self.lines = []
self.patches = []
self.texts = []
self.images = []
self.legends = []
if not keep_observers:
self._axobservers = []
self._suptitle = None
def clear(self):
"""
Clear the figure -- synonym for :meth:`clf`.
"""
self.clf()
@allow_rasterization
def draw(self, renderer):
"""
Render the figure using :class:`matplotlib.backend_bases.RendererBase`
instance *renderer*.
"""
# draw the figure bounding box, perhaps none for white figure
if not self.get_visible():
return
renderer.open_group('figure')
if self.get_tight_layout() and self.axes:
try:
self.tight_layout(renderer, **self._tight_parameters)
except ValueError:
pass
# ValueError can occur when resizing a window.
if self.frameon:
self.patch.draw(renderer)
# a list of (zorder, func_to_call, list_of_args)
dsu = []
for a in self.patches:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
for a in self.lines:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
for a in self.artists:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
# override the renderer default if self.suppressComposite
# is not None
not_composite = renderer.option_image_nocomposite()
if self.suppressComposite is not None:
not_composite = self.suppressComposite
if (len(self.images) <= 1 or not_composite or
not cbook.allequal([im.origin for im in self.images])):
for a in self.images:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
else:
# make a composite image blending alpha
# list of (_image.Image, ox, oy)
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag), im.ox, im.oy, im.get_alpha())
for im in self.images]
im = _image.from_images(self.bbox.height * mag,
self.bbox.width * mag,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
def draw_composite():
gc = renderer.new_gc()
gc.set_clip_rectangle(self.bbox)
gc.set_clip_path(self.get_clip_path())
renderer.draw_image(gc, l, b, im)
gc.restore()
dsu.append((self.images[0].get_zorder(), self.images[0],
draw_composite, []))
# render the axes
for a in self.axes:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
# render the figure text
for a in self.texts:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
for a in self.legends:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
dsu = [row for row in dsu if not row[1].get_animated()]
dsu.sort(key=itemgetter(0))
for zorder, a, func, args in dsu:
func(*args)
renderer.close_group('figure')
self._cachedRenderer = renderer
self.canvas.draw_event(renderer)
def draw_artist(self, a):
"""
draw :class:`matplotlib.artist.Artist` instance *a* only --
this is available only after the figure is drawn
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def get_axes(self):
return self.axes
def legend(self, handles, labels, *args, **kwargs):
"""
Place a legend in the figure. Labels are a sequence of
strings, handles is a sequence of
:class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances, and loc can be a
string or an integer specifying the legend location
USAGE::
legend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right')
The *loc* location codes are::
'best' : 0, (currently not supported for figure legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
*loc* can also be an (x,y) tuple in figure coords, which
specifies the lower left of the legend box. figure coords are
(0,0) is the left, bottom of the figure and 1,1 is the right,
top.
Keyword arguments:
*prop*: [ *None* | FontProperties | dict ]
A :class:`matplotlib.font_manager.FontProperties`
instance. If *prop* is a dictionary, a new instance will be
created with *prop*. If *None*, use rc settings.
*numpoints*: integer
The number of points in the legend line, default is 4
*scatterpoints*: integer
The number of points in the legend line, default is 4
*scatteryoffsets*: list of floats
a list of yoffsets for scatter symbols in legend
*markerscale*: [ *None* | scalar ]
The relative size of legend markers vs. original. If *None*, use rc
settings.
*fancybox*: [ *None* | *False* | *True* ]
if *True*, draw a frame with a round fancybox. If *None*, use rc
*shadow*: [ *None* | *False* | *True* ]
If *True*, draw a shadow behind legend. If *None*, use rc settings.
*ncol* : integer
number of columns. default is 1
*mode* : [ "expand" | *None* ]
if mode is "expand", the legend will be horizontally expanded
to fill the axes area (or *bbox_to_anchor*)
*title* : string
the legend title
Padding and spacing between various elements use following keywords
parameters. The dimensions of these values are given as a fraction
of the fontsize. Values from rcParams will be used if None.
================ ====================================================
Keyword Description
================ ====================================================
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ====================================================
.. Note:: Not all kinds of artist are supported by the legend.
See LINK (FIXME) for details.
**Example:**
.. plot:: mpl_examples/pylab_examples/figlegend_demo.py
"""
l = Legend(self, handles, labels, *args, **kwargs)
self.legends.append(l)
l._remove_method = lambda h: self.legends.remove(h)
return l
@docstring.dedent_interpd
def text(self, x, y, s, *args, **kwargs):
"""
Add text to figure.
Call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text to figure at location *x*, *y* (relative 0-1
coords). See :func:`~matplotlib.pyplot.text` for the meaning
of the other arguments.
kwargs control the :class:`~matplotlib.text.Text` properties:
%(Text)s
"""
override = _process_text_args({}, *args, **kwargs)
t = Text(x=x, y=y, text=s)
t.update(override)
self._set_artist_props(t)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
return t
def _set_artist_props(self, a):
if a != self:
a.set_figure(self)
a.set_transform(self.transFigure)
@docstring.dedent_interpd
def gca(self, **kwargs):
"""
Get the current axes, creating one if necessary
The following kwargs are supported for ensuring the returned axes
adheres to the given projection etc., and for axes creation if
the active axes does not exist:
%(Axes)s
"""
ckey, cax = self._axstack.current_key_axes()
# if there exists an axes on the stack see if it maches
# the desired axes configuration
if cax is not None:
# if no kwargs are given just return the current axes
# this is a convenience for gca() on axes such as polar etc.
if not kwargs:
return cax
# if the user has specified particular projection detail
# then build up a key which can represent this
else:
# we don't want to modify the original kwargs
# so take a copy so that we can do what we like to it
kwargs_copy = kwargs.copy()
projection_class, _, key = process_projection_requirements(
self, **kwargs_copy)
# let the returned axes have any gridspec by removing it from
# the key
ckey = ckey[1:]
key = key[1:]
# if the cax matches this key then return the axes, otherwise
# continue and a new axes will be created
if key == ckey and isinstance(cax, projection_class):
return cax
# no axes found, so create one which spans the figure
return self.add_subplot(1, 1, 1, **kwargs)
def sca(self, a):
'Set the current axes to be a and return a'
self._axstack.bubble(a)
for func in self._axobservers:
func(self)
return a
def _gci(self):
"""
helper for :func:`~matplotlib.pyplot.gci`;
do not use elsewhere.
"""
# Look first for an image in the current Axes:
cax = self._axstack.current_key_axes()[1]
if cax is None:
return None
im = cax._gci()
if im is not None:
return im
# If there is no image in the current Axes, search for
# one in a previously created Axes. Whether this makes
# sense is debatable, but it is the documented behavior.
for ax in reversed(self.axes):
im = ax._gci()
if im is not None:
return im
return None
def __getstate__(self):
state = self.__dict__.copy()
# the axobservers cannot currently be pickled.
# Additionally, the canvas cannot currently be pickled, but this has
# the benefit of meaning that a figure can be detached from one canvas,
# and re-attached to another.
for attr_to_pop in ('_axobservers', 'show',
'canvas', '_cachedRenderer'):
state.pop(attr_to_pop, None)
# add version information to the state
state['__mpl_version__'] = _mpl_version
# check to see if the figure has a manager and whether it is registered
# with pyplot
if getattr(self.canvas, 'manager', None) is not None:
manager = self.canvas.manager
import matplotlib._pylab_helpers
if manager in list(six.itervalues(
matplotlib._pylab_helpers.Gcf.figs)):
state['_restore_to_pylab'] = True
return state
def __setstate__(self, state):
version = state.pop('__mpl_version__')
restore_to_pylab = state.pop('_restore_to_pylab', False)
if version != _mpl_version:
import warnings
warnings.warn("This figure was saved with matplotlib version %s "
"and is unlikely to function correctly." %
(version, ))
self.__dict__ = state
# re-initialise some of the unstored state information
self._axobservers = []
self.canvas = None
if restore_to_pylab:
# lazy import to avoid circularity
import matplotlib.pyplot as plt
import matplotlib._pylab_helpers as pylab_helpers
allnums = plt.get_fignums()
num = max(allnums) + 1 if allnums else 1
mgr = plt._backend_mod.new_figure_manager_given_figure(num, self)
# XXX The following is a copy and paste from pyplot. Consider
# factoring to pylab_helpers
if self.get_label():
mgr.set_window_title(self.get_label())
# make this figure current on button press event
def make_active(event):
pylab_helpers.Gcf.set_active(mgr)
mgr._cidgcf = mgr.canvas.mpl_connect('button_press_event',
make_active)
pylab_helpers.Gcf.set_active(mgr)
self.number = num
plt.draw_if_interactive()
def add_axobserver(self, func):
'whenever the axes state change, ``func(self)`` will be called'
self._axobservers.append(func)
def savefig(self, *args, **kwargs):
"""
Save the current figure.
Call signature::
savefig(fname, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
The output formats available depend on the backend being used.
Arguments:
*fname*:
A string containing a path to a filename, or a Python
file-like object, or possibly some backend-dependent object
such as :class:`~matplotlib.backends.backend_pdf.PdfPages`.
If *format* is *None* and *fname* is a string, the output
format is deduced from the extension of the filename. If
the filename has no extension, the value of the rc parameter
``savefig.format`` is used.
If *fname* is not a string, remember to specify *format* to
ensure that the correct backend is used.
Keyword arguments:
*dpi*: [ *None* | ``scalar > 0`` ]
The resolution in dots per inch. If *None* it will default to
the value ``savefig.dpi`` in the matplotlibrc file.
*facecolor*, *edgecolor*:
the colors of the figure rectangle
*orientation*: [ 'landscape' | 'portrait' ]
not supported on all backends; currently only on postscript output
*papertype*:
One of 'letter', 'legal', 'executive', 'ledger', 'a0' through
'a10', 'b0' through 'b10'. Only supported for postscript
output.
*format*:
One of the file extensions supported by the active
backend. Most backends support png, pdf, ps, eps and svg.
*transparent*:
If *True*, the axes patches will all be transparent; the
figure patch will also be transparent unless facecolor
and/or edgecolor are specified via kwargs.
This is useful, for example, for displaying
a plot on top of a colored background on a web page. The
transparency of these patches will be restored to their
original values upon exit of this function.
*frameon*:
If *True*, the figure patch will be colored, if *False*, the
figure background will be transparent. If not provided, the
rcParam 'savefig.frameon' will be used.
*bbox_inches*:
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure.
*pad_inches*:
Amount of padding around the figure when bbox_inches is
'tight'.
*bbox_extra_artists*:
A list of extra artists that will be considered when the
tight bbox is calculated.
"""
kwargs.setdefault('dpi', rcParams['savefig.dpi'])
frameon = kwargs.pop('frameon', rcParams['savefig.frameon'])
transparent = kwargs.pop('transparent',
rcParams['savefig.transparent'])
if transparent:
kwargs.setdefault('facecolor', 'none')
kwargs.setdefault('edgecolor', 'none')
original_axes_colors = []
for ax in self.axes:
patch = ax.patch
original_axes_colors.append((patch.get_facecolor(),
patch.get_edgecolor()))
patch.set_facecolor('none')
patch.set_edgecolor('none')
else:
kwargs.setdefault('facecolor', rcParams['savefig.facecolor'])
kwargs.setdefault('edgecolor', rcParams['savefig.edgecolor'])
if frameon:
original_frameon = self.get_frameon()
self.set_frameon(frameon)
self.canvas.print_figure(*args, **kwargs)
if frameon:
self.set_frameon(original_frameon)
if transparent:
for ax, cc in zip(self.axes, original_axes_colors):
ax.patch.set_facecolor(cc[0])
ax.patch.set_edgecolor(cc[1])
@docstring.dedent_interpd
def colorbar(self, mappable, cax=None, ax=None, use_gridspec=True, **kw):
"""
Create a colorbar for a ScalarMappable instance, *mappable*.
Documentation for the pylab thin wrapper:
%(colorbar_doc)s
"""
if ax is None:
ax = self.gca()
# Store the value of gca so that we can set it back later on.
current_ax = self.gca()
if cax is None:
if use_gridspec and isinstance(ax, SubplotBase):
cax, kw = cbar.make_axes_gridspec(ax, **kw)
else:
cax, kw = cbar.make_axes(ax, **kw)
cax.hold(True)
cb = cbar.colorbar_factory(cax, mappable, **kw)
self.sca(current_ax)
return cb
def subplots_adjust(self, *args, **kwargs):
"""
Call signature::
subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
Update the :class:`SubplotParams` with *kwargs* (defaulting to rc when
*None*) and update the subplot locations
"""
self.subplotpars.update(*args, **kwargs)
for ax in self.axes:
if not isinstance(ax, SubplotBase):
# Check if sharing a subplots axis
if (ax._sharex is not None and
isinstance(ax._sharex, SubplotBase)):
ax._sharex.update_params()
ax.set_position(ax._sharex.figbox)
elif (ax._sharey is not None and
isinstance(ax._sharey, SubplotBase)):
ax._sharey.update_params()
ax.set_position(ax._sharey.figbox)
else:
ax.update_params()
ax.set_position(ax.figbox)
def ginput(self, n=1, timeout=30, show_clicks=True, mouse_add=1,
mouse_pop=3, mouse_stop=2):
"""
Call signature::
ginput(self, n=1, timeout=30, show_clicks=True,
mouse_add=1, mouse_pop=3, mouse_stop=2)
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is zero or negative, does not timeout.
If *n* is zero or negative, accumulate clicks until a middle click
(or potentially both mouse buttons at once) terminates the input.
Right clicking cancels last input.
The buttons used for the various actions (adding points, removing
points, terminating the inputs) can be overriden via the
arguments *mouse_add*, *mouse_pop* and *mouse_stop*, that give
the associated mouse button: 1 for left, 2 for middle, 3 for
right.
The keyboard can also be used to select points in case your mouse
does not have one or more of the buttons. The delete and backspace
keys act like right clicking (i.e., remove last point), the enter key
terminates input and any other key (not already used by the window
manager) selects a point.
"""
blocking_mouse_input = BlockingMouseInput(self,
mouse_add=mouse_add,
mouse_pop=mouse_pop,
mouse_stop=mouse_stop)
return blocking_mouse_input(n=n, timeout=timeout,
show_clicks=show_clicks)
def waitforbuttonpress(self, timeout=-1):
"""
Call signature::
waitforbuttonpress(self, timeout=-1)
Blocking call to interact with the figure.
This will return True is a key was pressed, False if a mouse
button was pressed and None if *timeout* was reached without
either being pressed.
If *timeout* is negative, does not timeout.
"""
blocking_input = BlockingKeyMouseInput(self)
return blocking_input(timeout=timeout)
def get_default_bbox_extra_artists(self):
bbox_artists = [artist for artist in self.get_children()
if artist.get_visible()]
for ax in self.axes:
if ax.get_visible():
bbox_artists.extend(ax.get_default_bbox_extra_artists())
# we don't want the figure's patch to influence the bbox calculation
bbox_artists.remove(self.patch)
return bbox_artists
def get_tightbbox(self, renderer):
"""
Return a (tight) bounding box of the figure in inches.
It only accounts axes title, axis labels, and axis
ticklabels. Needs improvement.
"""
bb = []
for ax in self.axes:
if ax.get_visible():
bb.append(ax.get_tightbbox(renderer))
if len(bb) == 0:
return self.bbox_inches
_bbox = Bbox.union([b for b in bb if b.width != 0 or b.height != 0])
bbox_inches = TransformedBbox(_bbox,
Affine2D().scale(1. / self.dpi))
return bbox_inches
def tight_layout(self, renderer=None, pad=1.08, h_pad=None,
w_pad=None, rect=None):
"""
Adjust subplot parameters to give specified padding.
Parameters:
*pad* : float
padding between the figure edge and the edges of subplots,
as a fraction of the font-size.
*h_pad*, *w_pad* : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
*rect* : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
from .tight_layout import (get_renderer, get_tight_layout_figure,
get_subplotspec_list)
subplotspec_list = get_subplotspec_list(self.axes)
if None in subplotspec_list:
warnings.warn("This figure includes Axes that are not "
"compatible with tight_layout, so its "
"results might be incorrect.")
if renderer is None:
renderer = get_renderer(self)
kwargs = get_tight_layout_figure(self, self.axes, subplotspec_list,
renderer,
pad=pad, h_pad=h_pad, w_pad=w_pad,
rect=rect)
self.subplots_adjust(**kwargs)
def figaspect(arg):
"""
Create a figure with specified aspect ratio. If *arg* is a number,
use that aspect ratio. If *arg* is an array, figaspect will
determine the width and height for a figure that would fit array
preserving aspect ratio. The figure width, height in inches are
returned. Be sure to create an axes with equal with and height,
e.g.,
Example usage::
# make a figure twice as tall as it is wide
w, h = figaspect(2.)
fig = Figure(figsize=(w,h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.imshow(A, **kwargs)
# make a figure with the proper aspect for an array
A = rand(5,3)
w, h = figaspect(A)
fig = Figure(figsize=(w,h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.imshow(A, **kwargs)
Thanks to Fernando Perez for this function
"""
isarray = hasattr(arg, 'shape')
# min/max sizes to respect when autoscaling. If John likes the idea, they
# could become rc parameters, for now they're hardwired.
figsize_min = np.array((4.0, 2.0)) # min length for width/height
figsize_max = np.array((16.0, 16.0)) # max length for width/height
#figsize_min = rcParams['figure.figsize_min']
#figsize_max = rcParams['figure.figsize_max']
# Extract the aspect ratio of the array
if isarray:
nr, nc = arg.shape[:2]
arr_ratio = float(nr) / nc
else:
arr_ratio = float(arg)
# Height of user figure defaults
fig_height = rcParams['figure.figsize'][1]
# New size for the figure, keeping the aspect ratio of the caller
newsize = np.array((fig_height / arr_ratio, fig_height))
# Sanity checks, don't drop either dimension below figsize_min
newsize /= min(1.0, *(newsize / figsize_min))
# Avoid humongous windows as well
newsize /= max(1.0, *(newsize / figsize_max))
# Finally, if we have a really funky aspect ratio, break it but respect
# the min/max dimensions (we don't want figures 10 feet tall!)
newsize = np.clip(newsize, figsize_min, figsize_max)
return newsize
docstring.interpd.update(Figure=martist.kwdoc(Figure))
| gpl-2.0 |
dylanGeng/BuildingMachineLearningSystemsWithPython | ch09/fft.py | 24 | 3673 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import sys
import os
import glob
import numpy as np
import scipy
import scipy.io.wavfile
from utils import GENRE_DIR, CHART_DIR
import matplotlib.pyplot as plt
from matplotlib.ticker import EngFormatter
def write_fft(fft_features, fn):
"""
Write the FFT features to separate files to speed up processing.
"""
base_fn, ext = os.path.splitext(fn)
data_fn = base_fn + ".fft"
np.save(data_fn, fft_features)
print("Written "%data_fn)
def create_fft(fn):
sample_rate, X = scipy.io.wavfile.read(fn)
fft_features = abs(scipy.fft(X)[:1000])
write_fft(fft_features, fn)
def read_fft(genre_list, base_dir=GENRE_DIR):
X = []
y = []
for label, genre in enumerate(genre_list):
genre_dir = os.path.join(base_dir, genre, "*.fft.npy")
file_list = glob.glob(genre_dir)
assert(file_list), genre_dir
for fn in file_list:
fft_features = np.load(fn)
X.append(fft_features[:2000])
y.append(label)
return np.array(X), np.array(y)
def plot_wav_fft(wav_filename, desc=None):
plt.clf()
plt.figure(num=None, figsize=(6, 4))
sample_rate, X = scipy.io.wavfile.read(wav_filename)
spectrum = np.fft.fft(X)
freq = np.fft.fftfreq(len(X), 1.0 / sample_rate)
plt.subplot(211)
num_samples = 200.0
plt.xlim(0, num_samples / sample_rate)
plt.xlabel("time [s]")
plt.title(desc or wav_filename)
plt.plot(np.arange(num_samples) / sample_rate, X[:num_samples])
plt.grid(True)
plt.subplot(212)
plt.xlim(0, 5000)
plt.xlabel("frequency [Hz]")
plt.xticks(np.arange(5) * 1000)
if desc:
desc = desc.strip()
fft_desc = desc[0].lower() + desc[1:]
else:
fft_desc = wav_filename
plt.title("FFT of %s" % fft_desc)
plt.plot(freq, abs(spectrum), linewidth=5)
plt.grid(True)
plt.tight_layout()
rel_filename = os.path.split(wav_filename)[1]
plt.savefig("%s_wav_fft.png" % os.path.splitext(rel_filename)[0],
bbox_inches='tight')
plt.show()
def plot_wav_fft_demo():
plot_wav_fft("sine_a.wav", "400Hz sine wave")
plot_wav_fft("sine_b.wav", "3,000Hz sine wave")
plot_wav_fft("sine_mix.wav", "Mixed sine wave")
def plot_specgram(ax, fn):
sample_rate, X = scipy.io.wavfile.read(fn)
ax.specgram(X, Fs=sample_rate, xextent=(0, 30))
def plot_specgrams(base_dir=CHART_DIR):
"""
Plot a bunch of spectrograms of wav files in different genres
"""
plt.clf()
genres = ["classical", "jazz", "country", "pop", "rock", "metal"]
num_files = 3
f, axes = plt.subplots(len(genres), num_files)
for genre_idx, genre in enumerate(genres):
for idx, fn in enumerate(glob.glob(os.path.join(GENRE_DIR, genre, "*.wav"))):
if idx == num_files:
break
axis = axes[genre_idx, idx]
axis.yaxis.set_major_formatter(EngFormatter())
axis.set_title("%s song %i" % (genre, idx + 1))
plot_specgram(axis, fn)
specgram_file = os.path.join(base_dir, "Spectrogram_Genres.png")
plt.savefig(specgram_file, bbox_inches="tight")
plt.show()
if __name__ == "__main__":
# for fn in glob.glob(os.path.join(sys.argv[1], "*.wav")):
# create_fft(fn)
# plot_decomp()
if len(sys.argv) > 1:
plot_wav_fft(sys.argv[1], desc="some sample song")
else:
plot_wav_fft_demo()
plot_specgrams()
| mit |
rafafigueroa/compass-gait | hasimpy.py | 1 | 9216 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: Rafael Figueroa
"""
dp = True
import numpy as np
DEBUG = False
class H:
"""Hybrid Automata Model"""
def __init__(self, Q, Init_X, Init_qID, state_names = None):
self.q = Q #list of q
self.Init_X = Init_X
self.Init_qID = Init_qID
self.states = state_names
self.Ts = None
def mode_tracker_guard_check(self, qID, X):
# Called by mode_tracker to set the mode
q = self.q[qID]
g=q.E.G #guard list
oe=q.E.OE #out edges list
[g_activated, oID_activated_g] = guard_check(g, X)
# return new qID when a guard is activated
if g_activated:
qID_activated_g = oe[oID_activated_g]
else:
qID_activated_g = qID
return qID_activated_g
def sim(self, qID, X, u, t0, tlim,
haws_flag=False,
debug_flag=False, Ts=1e-4):
self.Ts = Ts
#t0 refers to the initial time of
#each continuous dynamic time interval
sr = SimResult(self.states) #Initialize class
q = self.q[qID] #get a ref to current mode
global DEBUG
DEBUG = debug_flag #change global DEBUG variable
while t0<tlim:
#get values from current q object
f=q.f #continuous dynamics func
# when simulating is requested by haws
# with a forced input
if not haws_flag:
u=q.u
g=q.E.G #guard list
r=q.E.R #reset map list
oe=q.E.OE #out edges list
dom=q.Dom #discrete mode domain
avoid=q.Avoid #discrete mode avoid
if DEBUG:
print '\n*** New Discrete State *** \n'
print 'f=',f,'\ng=',g,'\nr=',r,'\noe=',oe,'\ndom=',dom
print 'Avoid=',avoid
print 'qID=',q.qID,'\nX=',X,'\nu=',u
print '\n*** domain check *** \n'
if not dom(X):
errorString = 'Outside domain!'
print errorString
#raise NameError(errorString)
if DEBUG:
print '\n*** continuous dynamics *** \n'
#simulate continuous dynamics
T, Y, oID_activated_g, \
avoid_activated, tlim_activated = \
odeeul(f, u, g, avoid, X, t0, tlim, Ts)
# store this time interval
# in the simulation results
sr.newTimeInterval(T, Y, q)
# when inside the avoid set, simulation stops
# and the information is stored in the simulation results
if avoid_activated:
sr.avoid_activated = True
sr.timeToAvoid = T[-1]
break #while loop
if tlim_activated:
break #while loop
# *** after guard is activated ***
# prepare data for the next loop
t0=T[-1] #reset initial time to the end of
#last time interval
last_state = np.array(Y[-1])
if DEBUG:
print '\n *** reset map *** \n'
print 'last state =',last_state
X=r[oID_activated_g](last_state) #reset map
qID_activated_g = oe[oID_activated_g]
#guard activated print out
if DEBUG:
print 'sim -- guard activated'
print 'sim -- from q =', q.qID, 'to q =', qID_activated_g
print 'sim -- State =', X
#get new q
q = self.q[qID_activated_g]
return sr
class Q:
def __init__(self,qID,f,u,E,
Dom = lambda X:True,
Avoid = lambda X:False ,
TC=True):
self.qID = qID
self.f = f
self.u = u
self.E = E
self.Dom = Dom
self.Avoid = Avoid
self.TC = TC
class E:
def __init__(self,OE,G,R):
self.OE = OE
self.G = G
self.R = R
def guard_check(g,X):
guard_list = []
#evaluate every guard in g
#g is the list of guards for this q
#store the results in guard_list
for guard in g:
guard_list.append(guard(X))
oID_activated_g = None
g_activated = False
#check if any result in guard_list is True
#if it is, store the index
for oID,guard in enumerate(guard_list):
if guard:
oID_activated_g = oID #outside q which tripped the guard
g_activated = True
break
return [g_activated, oID_activated_g]
def avoid_check(avoid,X):
'avoid returns True when inside the avoid set'
return avoid(X)
def odeeul(f, u, g, avoid, X0, t0, tlim, Ts):
X=np.array(X0)
Y=np.array(X0)
T=np.array([t0])
if DEBUG:
print 'State=',X
g_activated, oID_activated_g = guard_check(g,X)
avoid_activated = avoid_check(avoid,X)
tlim_activated = (t0>=tlim)
if g_activated:
print 'instant jump'
if DEBUG:
print 'First checks:'
print '\tg_activated:', g_activated
print '\tavoid_activated', avoid_activated
print '\ttlim_activated', tlim_activated
while not (g_activated or avoid_activated or tlim_activated):
#Evolve continuously until a
#termination condition is activated
X=Ts*f(X,u)+X
Y=np.vstack((Y,X))
tnew = np.array([T[-1]+Ts])
T=np.concatenate([T,tnew])
#termination checks
g_activated, oID_activated_g = guard_check(g,X)
avoid_activated = avoid_check(avoid,X)
tlim_activated = (tnew>=tlim)
if DEBUG:
print 'Running checks:'
print '\tg_activated:',g_activated
print '\tavoid_activated',avoid_activated
print '\ttlim_activated',tlim_activated
return [T, Y, oID_activated_g, avoid_activated,
tlim_activated]
class SimResult:
"""Output from one simulation run"""
def __init__(self, states = None):
self.I = []
self.j = 0
self.timesteps = 0
self.timeToAvoid = None
self.avoid_activated = False
self.path = None
self.time = None
self.mode = None
self.states = states
for yi in range(0, len(states)):
self.states[yi] = "$" + self.states[yi] + "$"
self.states[yi] = self.states[yi].encode('string-escape')
self.states[yi] = self.states[yi].replace("\\\\", "\\")
def newTimeInterval(self, T, Y, qID):
"""Simulation is broken into continuous chunks
Here the chunks are put together"""
if self.j == 0:
# First interval
self.path = Y
self.time = T
self.mode = np.array([qID])
else:
self.path = np.vstack((self.path, Y))
self.time = np.concatenate((self.time, T))
self.mode = np.concatenate((self.mode, np.array([qID])))
self.j = self.j + 1
self.timesteps = self.timesteps + np.size(T)
self.I.append(TimeInterval(T, Y, self.j))
def simPlot(self):
Y_plot = self.path
T_plot = self.time
import matplotlib.pyplot as plt
# TODO: Configurate at install?
# user might not want latex
from matplotlib import rc
rc('text', usetex=True)
nstates = np.size(Y_plot,1)
f, axarr = plt.subplots(nstates, sharex=True)
if nstates>1:
for yi in range(nstates):
axarr[yi].plot(T_plot, Y_plot[:,yi])
if self.states is not None:
axarr[nstates-1].set_xlabel(r'time(s)')
axarr[yi].set_ylabel(self.states[yi], fontsize = 20)
axarr[yi].yaxis.set_label_coords(-0.08, 0.5)
else:
axarr.plot(T_plot,Y_plot)
if self.states is not None:
axarr.set_xlabel('time(s)')
axarr.set_ylabel(self.states[0])
plt.ion()
plt.show()
def phasePlot(self, plotStates):
#TODO:check size of Y,plotStates
X1_plot = self.path[:,plotStates[0]]
X2_plot = self.path[:,plotStates[1]]
import matplotlib.pyplot as plt
# figx = plt.figure()
f, axarr = plt.subplots(1, sharex=True)
axarr.plot(X1_plot,X2_plot)
if self.states is not None:
axarr.set_xlabel(self.states[plotStates[0]], fontsize = 20)
axarr.set_ylabel(self.states[plotStates[1]], fontsize = 20)
axarr.yaxis.set_label_coords(-0.08, 0.5)
plt.ion()
plt.show()
class TimeInterval:
def __init__(self,T,Y,j):
self.T=T
self.Y=Y
self.j=j
def idem(X):
return X
def tolEqual(a,b,tol=1e-2):
return abs(a-b)<tol
def last_row(Y):
print 'shape', np.shape(Y)
rows = np.shape(Y)[0]
print 'rows',rows
if rows>1:
return Y[-1]
else:
return Y
| gpl-2.0 |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/offsetbox.py | 10 | 54984 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import warnings
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import matplotlib.path as mpath
import numpy as np
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
from matplotlib.font_manager import FontProperties
from matplotlib.patches import FancyBboxPatch, FancyArrowPatch
from matplotlib import rcParams
from matplotlib import docstring
#from bboximage import BboxImage
from matplotlib.image import BboxImage
from matplotlib.patches import bbox_artist as mbbox_artist
from matplotlib.text import _AnnotationBase
DEBUG = False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = list(zip(*wd_list))
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
if len(w_list) > 1:
sep = (total - sum(w_list)) / (len(w_list) - 1.)
else:
sep = 0.
offsets_ = np.add.accumulate([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh + sep) * len(w_list)
else:
sep = float(total) / (len(w_list)) - maxh
offsets = np.array([(maxh + sep) * i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Given a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analogous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h - d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left", "top"]:
descent = 0.
offsets = [d for h, d in hd_list]
elif align in ["right", "bottom"]:
descent = 0.
offsets = [height - h + d for h, d in hd_list]
elif align == "center":
descent = 0.
offsets = [(height - h) * .5 + d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
# Clipping has not been implemented in the OffesetBox family, so
# disable the clip flag for consistency. It can always be turned back
# on to zero effect.
self.set_clip_on(False)
self._children = []
self._offset = (0, 0)
def __getstate__(self):
state = martist.Artist.__getstate__(self)
# pickle cannot save instancemethods, so handle them here
from .cbook import _InstanceMethodPickler
import inspect
offset = state['_offset']
if inspect.ismethod(offset):
state['_offset'] = _InstanceMethodPickler(offset)
return state
def __setstate__(self, state):
self.__dict__ = state
from .cbook import _InstanceMethodPickler
if isinstance(self._offset, _InstanceMethodPickler):
self._offset = self._offset.get_instancemethod()
self.stale = True
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
@martist.Artist.axes.setter
def axes(self, ax):
# TODO deal with this better
martist.Artist.axes.fset(self, ax)
for c in self.get_children():
if c is not None:
c.axes = ax
def contains(self, mouseevent):
for c in self.get_children():
a, b = c.contains(mouseevent)
if a:
return a, b
return False, {}
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
self.stale = True
def get_offset(self, width, height, xdescent, ydescent, renderer):
"""
Get the offset
accepts extent of the box
"""
if six.callable(self._offset):
return self._offset(width, height, xdescent, ydescent, renderer)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
self.stale = True
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
self.stale = True
def get_visible_children(self):
"""
Return a list of visible artists it contains.
"""
return [c for c in self._children if c.get_visible()]
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd, renderer)
return mtransforms.Bbox.from_bounds(px - xd, py - yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(
renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
Width and height of the container box, calculated if
`None`.
align : str, optional
Alignment of boxes. Can be one of ``top``, ``bottom``,
``left``, ``right``, ``center`` and ``baseline``
mode : str, optional
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative positions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
width and height of the container box, calculated if
`None`.
align : str, optional
Alignment of boxes.
mode : str, optional
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
if self.width is not None:
for c in self.get_visible_children():
if isinstance(c, PackerBase) and c.mode == "expand":
c.set_width(self.width)
whd_list = [c.get_extent(renderer)
for c in self.get_visible_children()]
whd_list = [(w, h, xd, (h - yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w, h, xd, yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
sep, self.mode)
yoffsets = yoffsets_ + [yd for w, h, xd, yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2 * pad, height + 2 * pad, \
xdescent + pad, ydescent + pad, \
list(zip(xoffsets, yoffsets))
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjusts the relative positions of children at draw time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
Parameters
----------
pad : float, optional
Boundary pad.
sep : float, optional
Spacing between items.
width : float, optional
height : float, optional
Width and height of the container box, calculated if
`None`.
align : str
Alignment of boxes.
mode : str
Packing mode.
Notes
-----
*pad* and *sep* need to given in points and will be scale with
the renderer dpi, while *width* and *height* need to be in
pixels.
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of children and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
whd_list = [c.get_extent(renderer)
for c in self.get_visible_children()]
if not whd_list:
return 2 * pad, 2 * pad, pad, pad, []
if self.height is None:
height_descent = max([h - yd for w, h, xd, yd in whd_list])
ydescent = max([yd for w, h, xd, yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2 * pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
sep, self.mode)
xoffsets = xoffsets_ + [xd for w, h, xd, yd in whd_list]
xdescent = whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2 * pad, height + 2 * pad, \
xdescent + pad, ydescent + pad, \
list(zip(xoffsets, yoffsets))
class PaddedBox(OffsetBox):
def __init__(self, child, pad=None, draw_frame=False, patch_attrs=None):
"""
*pad* : boundary pad
.. note::
*pad* need to given in points and will be
scale with the renderer dpi, while *width* and *height*
need to be in pixels.
"""
super(PaddedBox, self).__init__()
self.pad = pad
self._children = [child]
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=1, # self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=0)
if patch_attrs is not None:
self.patch.update(patch_attrs)
self._drawFrame = draw_frame
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
w, h, xd, yd = self._children[0].get_extent(renderer)
return w + 2 * pad, h + 2 * pad, \
xd + pad, yd + pad, \
[(0, 0)]
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(
renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
self.draw_frame(renderer)
for c in self.get_visible_children():
c.draw(renderer)
#bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
self.stale = True
def draw_frame(self, renderer):
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox)
if self._drawFrame:
self.patch.draw(renderer)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed. The children can be clipped at the
boundaries of the parent.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=False):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
*clip* : Whether to clip the children
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self._clip_children = clip
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self.dpi_transform = mtransforms.Affine2D()
@property
def clip_children(self):
"""
If the children of this DrawingArea should be clipped
by DrawingArea bounding box.
"""
return self._clip_children
@clip_children.setter
def clip_children(self, val):
self._clip_children = bool(val)
self.stale = True
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.dpi_transform + self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
dpi_cor = renderer.points_to_pixels(1.)
return self.width * dpi_cor, self.height * dpi_cor, \
self.xdescent * dpi_cor, self.ydescent * dpi_cor
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
if not a.is_transform_set():
a.set_transform(self.get_transform())
if self.axes is not None:
a.axes = self.axes
fig = self.figure
if fig is not None:
a.set_figure(fig)
def draw(self, renderer):
"""
Draw the children
"""
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear()
self.dpi_transform.scale(dpi_cor, dpi_cor)
# At this point the DrawingArea has a transform
# to the display space so the path created is
# good for clipping children
tpath = mtransforms.TransformedPath(
mpath.Path([[0, 0], [0, self.height],
[self.width, self.height],
[self.width, 0]]),
self.get_transform())
for c in self._children:
if self._clip_children and not (c.clipbox or c._clippath):
c.set_clip_path(tpath)
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
Parameters
----------
s : str
a string to be displayed.
textprops : `~matplotlib.font_manager.FontProperties`, optional
multilinebaseline : bool, optional
If `True`, baseline for multiline text is adjusted so that
it is (approximatedly) center-aligned with singleline
text.
minimumdescent : bool, optional
If `True`, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if "va" not in textprops:
textprops["va"] = "baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform +
self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_text(self, s):
"Set the text of this area as a string."
self._text.set_text(s)
self.stale = True
def get_text(self):
"Returns the string representation of this area's text"
return self._text.get_text()
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
self.stale = True
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
self.stale = True
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y coordinates in display units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info, d = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[-1][0] # last line
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline:
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h - d)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(d, d_)
#else:
# d = d
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AuxTransformBox(OffsetBox):
"""
Offset Box with the aux_transform . Its children will be
transformed with the aux_transform first then will be
offseted. The absolute coordinate of the aux_transform is meaning
as it will be automatically adjust so that the left-lower corner
of the bounding box of children will be set to (0,0) before the
offset transform.
It is similar to drawing area, except that the extent of the box
is not predetermined but calculated from the window extent of its
children. Furthermore, the extent of the children will be
calculated in the transformed coordinate.
"""
def __init__(self, aux_transform):
self.aux_transform = aux_transform
OffsetBox.__init__(self)
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
# ref_offset_transform is used to make the offset_transform is
# always reference to the lower-left corner of the bbox of its
# children.
self.ref_offset_transform = mtransforms.Affine2D()
self.ref_offset_transform.clear()
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
self.stale = True
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.aux_transform + \
self.ref_offset_transform + \
self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y coordinate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
# clear the offset transforms
_off = self.offset_transform.to_values() # to be restored later
self.ref_offset_transform.clear()
self.offset_transform.clear()
# calculate the extent
bboxes = [c.get_window_extent(renderer) for c in self._children]
ub = mtransforms.Bbox.union(bboxes)
# adjust ref_offset_tansform
self.ref_offset_transform.translate(-ub.x0, -ub.y0)
# restor offset transform
mtx = self.offset_transform.matrix_from_values(*_off)
self.offset_transform.set_matrix(mtx)
return ub.width, ub.height, 0., 0.
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AnchoredOffsetbox(OffsetBox):
"""
An offset box placed according to the legend location
loc. AnchoredOffsetbox has a single child. When multiple children
is needed, use other OffsetBox class to enclose them. By default,
the offset box is anchored against its parent axes. You may
explicitly specify the bbox_to_anchor.
"""
zorder = 5 # zorder of the legend
def __init__(self, loc,
pad=0.4, borderpad=0.5,
child=None, prop=None, frameon=True,
bbox_to_anchor=None,
bbox_transform=None,
**kwargs):
"""
loc is a string or an integer specifying the legend location.
The valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
pad : pad around the child for drawing a frame. given in
fraction of fontsize.
borderpad : pad between offsetbox frame and the bbox_to_anchor,
child : OffsetBox instance that will be anchored.
prop : font property. This is only used as a reference for paddings.
frameon : draw a frame box if True.
bbox_to_anchor : bbox to anchor. Use self.axes.bbox if None.
bbox_transform : with which the bbox_to_anchor will be transformed.
"""
super(AnchoredOffsetbox, self).__init__(**kwargs)
self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
self.set_child(child)
self.loc = loc
self.borderpad = borderpad
self.pad = pad
if prop is None:
self.prop = FontProperties(size=rcParams["legend.fontsize"])
elif isinstance(prop, dict):
self.prop = FontProperties(**prop)
if "size" not in prop:
self.prop.set_size(rcParams["legend.fontsize"])
else:
self.prop = prop
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=0)
self._drawFrame = frameon
def set_child(self, child):
"set the child to be anchored"
self._child = child
if child is not None:
child.axes = self.axes
self.stale = True
def get_child(self):
"return the child"
return self._child
def get_children(self):
"return the list of children"
return [self._child]
def get_extent(self, renderer):
"""
return the extent of the artist. The extent of the child
added with the pad is returned
"""
w, h, xd, yd = self.get_child().get_extent(renderer)
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return w + 2 * pad, h + 2 * pad, xd + pad, yd + pad
def get_bbox_to_anchor(self):
"""
return the bbox that the legend will be anchored
"""
if self._bbox_to_anchor is None:
return self.axes.bbox
else:
transform = self._bbox_to_anchor_transform
if transform is None:
return self._bbox_to_anchor
else:
return TransformedBbox(self._bbox_to_anchor,
transform)
def set_bbox_to_anchor(self, bbox, transform=None):
"""
set the bbox that the child will be anchored.
*bbox* can be a Bbox instance, a list of [left, bottom, width,
height], or a list of [left, bottom] where the width and
height will be assumed to be zero. The bbox will be
transformed to display coordinate by the given transform.
"""
if bbox is None or isinstance(bbox, BboxBase):
self._bbox_to_anchor = bbox
else:
try:
l = len(bbox)
except TypeError:
raise ValueError("Invalid argument for bbox : %s" % str(bbox))
if l == 2:
bbox = [bbox[0], bbox[1], 0, 0]
self._bbox_to_anchor = Bbox.from_bounds(*bbox)
self._bbox_to_anchor_transform = transform
self.stale = True
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
self._update_offset_func(renderer)
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset(w, h, xd, yd, renderer)
return Bbox.from_bounds(ox - xd, oy - yd, w, h)
def _update_offset_func(self, renderer, fontsize=None):
"""
Update the offset func which depends on the dpi of the
renderer (because of the padding).
"""
if fontsize is None:
fontsize = renderer.points_to_pixels(
self.prop.get_size_in_points())
def _offset(w, h, xd, yd, renderer, fontsize=fontsize, self=self):
bbox = Bbox.from_bounds(0, 0, w, h)
borderpad = self.borderpad * fontsize
bbox_to_anchor = self.get_bbox_to_anchor()
x0, y0 = self._get_anchored_bbox(self.loc,
bbox,
bbox_to_anchor,
borderpad)
return x0 + xd, y0 + yd
self.set_offset(_offset)
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw(self, renderer):
"draw the artist"
if not self.get_visible():
return
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
if self._drawFrame:
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox, fontsize)
self.patch.draw(renderer)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
self.get_child().set_offset((px, py))
self.get_child().draw(renderer)
self.stale = False
def _get_anchored_bbox(self, loc, bbox, parentbbox, borderpad):
"""
return the position of the bbox anchored at the parentbbox
with the loc code, with the borderpad.
"""
assert loc in range(1, 11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = list(xrange(11))
anchor_coefs = {UR: "NE",
UL: "NW",
LL: "SW",
LR: "SE",
R: "E",
CL: "W",
CR: "E",
LC: "S",
UC: "N",
C: "C"}
c = anchor_coefs[loc]
container = parentbbox.padded(-borderpad)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
class AnchoredText(AnchoredOffsetbox):
"""
AnchoredOffsetbox with Text.
"""
def __init__(self, s, loc, pad=0.4, borderpad=0.5, prop=None, **kwargs):
"""
Parameters
----------
s : string
Text.
loc : str
Location code.
pad : float, optional
Pad between the text and the frame as fraction of the font
size.
borderpad : float, optional
Pad between the frame and the axes (or *bbox_to_anchor*).
prop : `matplotlib.font_manager.FontProperties`
Font properties.
Notes
-----
Other keyword parameters of `AnchoredOffsetbox` are also
allowed.
"""
if prop is None:
prop = {}
propkeys = list(six.iterkeys(prop))
badkwargs = ('ha', 'horizontalalignment', 'va', 'verticalalignment')
if set(badkwargs) & set(propkeys):
warnings.warn("Mixing horizontalalignment or verticalalignment "
"with AnchoredText is not supported.")
self.txt = TextArea(s, textprops=prop,
minimumdescent=False)
fp = self.txt._text.get_fontproperties()
super(AnchoredText, self).__init__(loc, pad=pad, borderpad=borderpad,
child=self.txt,
prop=fp,
**kwargs)
class OffsetImage(OffsetBox):
def __init__(self, arr,
zoom=1,
cmap=None,
norm=None,
interpolation=None,
origin=None,
filternorm=1,
filterrad=4.0,
resample=False,
dpi_cor=True,
**kwargs
):
OffsetBox.__init__(self)
self._dpi_cor = dpi_cor
self.image = BboxImage(bbox=self.get_window_extent,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs
)
self._children = [self.image]
self.set_zoom(zoom)
self.set_data(arr)
def set_data(self, arr):
self._data = np.asarray(arr)
self.image.set_data(self._data)
self.stale = True
def get_data(self):
return self._data
def set_zoom(self, zoom):
self._zoom = zoom
self.stale = True
def get_zoom(self):
return self._zoom
# def set_axes(self, axes):
# self.image.set_axes(axes)
# martist.Artist.set_axes(self, axes)
# def set_offset(self, xy):
# """
# set offset of the container.
# Accept : tuple of x,y coordinate in disokay units.
# """
# self._offset = xy
# self.offset_transform.clear()
# self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_children(self):
return [self.image]
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset()
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
if self._dpi_cor: # True, do correction
dpi_cor = renderer.points_to_pixels(1.)
else:
dpi_cor = 1.
zoom = self.get_zoom()
data = self.get_data()
ny, nx = data.shape[:2]
w, h = dpi_cor * nx * zoom, dpi_cor * ny * zoom
return w, h, 0, 0
def draw(self, renderer):
"""
Draw the children
"""
self.image.draw(renderer)
# bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AnnotationBbox(martist.Artist, _AnnotationBase):
"""
Annotation-like class, but with offsetbox instead of Text.
"""
zorder = 3
def __str__(self):
return "AnnotationBbox(%g,%g)" % (self.xy[0], self.xy[1])
@docstring.dedent_interpd
def __init__(self, offsetbox, xy,
xybox=None,
xycoords='data',
boxcoords=None,
frameon=True, pad=0.4, # BboxPatch
annotation_clip=None,
box_alignment=(0.5, 0.5),
bboxprops=None,
arrowprops=None,
fontsize=None,
**kwargs):
"""
*offsetbox* : OffsetBox instance
*xycoords* : same as Annotation but can be a tuple of two
strings which are interpreted as x and y coordinates.
*boxcoords* : similar to textcoords as Annotation but can be a
tuple of two strings which are interpreted as x and y
coordinates.
*box_alignment* : a tuple of two floats for a vertical and
horizontal alignment of the offset box w.r.t. the *boxcoords*.
The lower-left corner is (0.0) and upper-right corner is (1.1).
other parameters are identical to that of Annotation.
"""
martist.Artist.__init__(self, **kwargs)
_AnnotationBase.__init__(self,
xy,
xycoords=xycoords,
annotation_clip=annotation_clip)
self.offsetbox = offsetbox
self.arrowprops = arrowprops
self.set_fontsize(fontsize)
if xybox is None:
self.xybox = xy
else:
self.xybox = xybox
if boxcoords is None:
self.boxcoords = xycoords
else:
self.boxcoords = boxcoords
if arrowprops is not None:
self._arrow_relpos = self.arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
**self.arrowprops)
else:
self._arrow_relpos = None
self.arrow_patch = None
#self._fw, self._fh = 0., 0. # for alignment
self._box_alignment = box_alignment
# frame
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=pad)
if bboxprops:
self.patch.set(**bboxprops)
self._drawFrame = frameon
@property
def xyann(self):
return self.xybox
@xyann.setter
def xyann(self, xyann):
self.xybox = xyann
self.stale = True
@property
def anncoords(self):
return self.boxcoords
@anncoords.setter
def anncoords(self, coords):
self.boxcoords = coords
self.stale = True
def contains(self, event):
t, tinfo = self.offsetbox.contains(event)
#if self.arrow_patch is not None:
# a,ainfo=self.arrow_patch.contains(event)
# t = t or a
# self.arrow_patch is currently not checked as this can be a line - JJ
return t, tinfo
def get_children(self):
children = [self.offsetbox, self.patch]
if self.arrow_patch:
children.append(self.arrow_patch)
return children
def set_figure(self, fig):
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
self.offsetbox.set_figure(fig)
martist.Artist.set_figure(self, fig)
def set_fontsize(self, s=None):
"""
set fontsize in points
"""
if s is None:
s = rcParams["legend.fontsize"]
self.prop = FontProperties(size=s)
self.stale = True
def get_fontsize(self, s=None):
"""
return fontsize in points
"""
return self.prop.get_size_in_points()
def update_positions(self, renderer):
"""
Update the pixel positions of the annotated point and the text.
"""
xy_pixel = self._get_position_xy(renderer)
self._update_position_xybox(renderer, xy_pixel)
mutation_scale = renderer.points_to_pixels(self.get_fontsize())
self.patch.set_mutation_scale(mutation_scale)
if self.arrow_patch:
self.arrow_patch.set_mutation_scale(mutation_scale)
def _update_position_xybox(self, renderer, xy_pixel):
"""
Update the pixel positions of the annotation text and the arrow
patch.
"""
x, y = self.xybox
if isinstance(self.boxcoords, tuple):
xcoord, ycoord = self.boxcoords
x1, y1 = self._get_xy(renderer, x, y, xcoord)
x2, y2 = self._get_xy(renderer, x, y, ycoord)
ox0, oy0 = x1, y2
else:
ox0, oy0 = self._get_xy(renderer, x, y, self.boxcoords)
w, h, xd, yd = self.offsetbox.get_extent(renderer)
_fw, _fh = self._box_alignment
self.offsetbox.set_offset((ox0 - _fw * w + xd, oy0 - _fh * h + yd))
# update patch position
bbox = self.offsetbox.get_window_extent(renderer)
#self.offsetbox.set_offset((ox0-_fw*w, oy0-_fh*h))
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
x, y = xy_pixel
ox1, oy1 = x, y
if self.arrowprops:
x0, y0 = x, y
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1, oy1))
fs = self.prop.get_size_in_points()
mutation_scale = d.pop("mutation_scale", fs)
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
patchA = d.pop("patchA", self.patch)
self.arrow_patch.set_patchA(patchA)
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
xy_pixel = self._get_position_xy(renderer)
if not self._check_xy(renderer, xy_pixel):
return
self.update_positions(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
if self._drawFrame:
self.patch.draw(renderer)
self.offsetbox.draw(renderer)
self.stale = False
class DraggableBase(object):
"""
helper code for a draggable artist (legend, offsetbox)
The derived class must override following two method.
def saveoffset(self):
pass
def update_offset(self, dx, dy):
pass
*saveoffset* is called when the object is picked for dragging and it is
meant to save reference position of the artist.
*update_offset* is called during the dragging. dx and dy is the pixel
offset from the point where the mouse drag started.
Optionally you may override following two methods.
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def finalize_offset(self):
pass
*artist_picker* is a picker method that will be
used. *finalize_offset* is called when the mouse is released. In
current implementaion of DraggableLegend and DraggableAnnotation,
*update_offset* places the artists simply in display
coordinates. And *finalize_offset* recalculate their position in
the normalized axes coordinate and set a relavant attribute.
"""
def __init__(self, ref_artist, use_blit=False):
self.ref_artist = ref_artist
self.got_artist = False
self.canvas = self.ref_artist.figure.canvas
self._use_blit = use_blit and self.canvas.supports_blit
c2 = self.canvas.mpl_connect('pick_event', self.on_pick)
c3 = self.canvas.mpl_connect('button_release_event', self.on_release)
ref_artist.set_picker(self.artist_picker)
self.cids = [c2, c3]
def on_motion(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.draw()
def on_motion_blit(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.restore_region(self.background)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
def on_pick(self, evt):
if evt.artist == self.ref_artist:
self.mouse_x = evt.mouseevent.x
self.mouse_y = evt.mouseevent.y
self.got_artist = True
if self._use_blit:
self.ref_artist.set_animated(True)
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(
self.ref_artist.figure.bbox)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
self._c1 = self.canvas.mpl_connect('motion_notify_event',
self.on_motion_blit)
else:
self._c1 = self.canvas.mpl_connect('motion_notify_event',
self.on_motion)
self.save_offset()
def on_release(self, event):
if self.got_artist:
self.finalize_offset()
self.got_artist = False
self.canvas.mpl_disconnect(self._c1)
if self._use_blit:
self.ref_artist.set_animated(False)
def disconnect(self):
"""disconnect the callbacks"""
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
try:
c1 = self._c1
except AttributeError:
pass
else:
self.canvas.mpl_disconnect(c1)
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def save_offset(self):
pass
def update_offset(self, dx, dy):
pass
def finalize_offset(self):
pass
class DraggableOffsetBox(DraggableBase):
def __init__(self, ref_artist, offsetbox, use_blit=False):
DraggableBase.__init__(self, ref_artist, use_blit=use_blit)
self.offsetbox = offsetbox
def save_offset(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
offset = offsetbox.get_offset(w, h, xd, yd, renderer)
self.offsetbox_x, self.offsetbox_y = offset
self.offsetbox.set_offset(offset)
def update_offset(self, dx, dy):
loc_in_canvas = self.offsetbox_x + dx, self.offsetbox_y + dy
self.offsetbox.set_offset(loc_in_canvas)
def get_loc_in_canvas(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
ox, oy = offsetbox._offset
loc_in_canvas = (ox - xd, oy - yd)
return loc_in_canvas
class DraggableAnnotation(DraggableBase):
def __init__(self, annotation, use_blit=False):
DraggableBase.__init__(self, annotation, use_blit=use_blit)
self.annotation = annotation
def save_offset(self):
ann = self.annotation
self.ox, self.oy = ann.get_transform().transform(ann.xyann)
def update_offset(self, dx, dy):
ann = self.annotation
ann.xyann = ann.get_transform().inverted().transform(
(self.ox + dx, self.oy + dy))
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
ax = plt.subplot(121)
#txt = ax.text(0.5, 0.5, "Test", size=30, ha="center", color="w")
kwargs = dict()
a = np.arange(256).reshape(16, 16) / 256.
myimage = OffsetImage(a,
zoom=2,
norm=None,
origin=None,
**kwargs
)
ax.add_artist(myimage)
myimage.set_offset((100, 100))
myimage2 = OffsetImage(a,
zoom=2,
norm=None,
origin=None,
**kwargs
)
ann = AnnotationBbox(myimage2, (0.5, 0.5),
xybox=(30, 30),
xycoords='data',
boxcoords="offset points",
frameon=True, pad=0.4, # BboxPatch
bboxprops=dict(boxstyle="round", fc="y"),
fontsize=None,
arrowprops=dict(arrowstyle="->"),
)
ax.add_artist(ann)
plt.draw()
plt.show()
| bsd-3-clause |
lucidfrontier45/scikit-learn | examples/manifold/plot_manifold_sphere.py | 1 | 4572 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the Manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similiar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD
print __doc__
from time import time
import numpy as np
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = pl.figure(figsize=(15, 8))
pl.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(241, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=pl.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print "%s: %.2g sec" % (methods[i], t1 - t0)
ax = fig.add_subplot(242 + i)
pl.scatter(trans_data[0], trans_data[1], c=colors, cmap=pl.cm.rainbow)
pl.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print "%s: %.2g sec" % ('ISO', t1 - t0)
ax = fig.add_subplot(246)
pl.scatter(trans_data[0], trans_data[1], c=colors, cmap=pl.cm.rainbow)
pl.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print "MDS: %.2g sec" % (t1 - t0)
ax = fig.add_subplot(247)
pl.scatter(trans_data[0], trans_data[1], c=colors, cmap=pl.cm.rainbow)
pl.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print "Spectral Embedding: %.2g sec" % (t1 - t0)
ax = fig.add_subplot(248)
pl.scatter(trans_data[0], trans_data[1], c=colors, cmap=pl.cm.rainbow)
pl.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
pl.show()
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/series/methods/test_rename_axis.py | 4 | 1503 | import pytest
from pandas import Index, MultiIndex, Series
import pandas._testing as tm
class TestSeriesRenameAxis:
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
ser = Series(list(range(len(mi))), index=mi)
result = ser.rename_axis(index={"ll": "foo"})
assert result.index.names == ["foo", "nn"]
result = ser.rename_axis(index=str.upper, axis=0)
assert result.index.names == ["LL", "NN"]
result = ser.rename_axis(index=["foo", "goo"])
assert result.index.names == ["foo", "goo"]
with pytest.raises(TypeError, match="unexpected"):
ser.rename_axis(columns="wrong")
def test_rename_axis_inplace(self, datetime_series):
# GH 15704
expected = datetime_series.rename_axis("foo")
result = datetime_series
no_return = result.rename_axis("foo", inplace=True)
assert no_return is None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{"mapper": None}, {"index": None}, {}])
def test_rename_axis_none(self, kwargs):
# GH 25034
index = Index(list("abc"), name="foo")
ser = Series([1, 2, 3], index=index)
result = ser.rename_axis(**kwargs)
expected_index = index.rename(None) if kwargs else index
expected = Series([1, 2, 3], index=expected_index)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/matplotlib/tests/test_mlab.py | 5 | 122196 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import tempfile
from numpy.testing import assert_allclose, assert_array_equal
import numpy.ma.testutils as matest
import numpy as np
import datetime as datetime
from nose.tools import (assert_equal, assert_almost_equal, assert_not_equal,
assert_true, assert_raises)
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
from matplotlib.testing.decorators import knownfailureif, CleanupTestCase
try:
from mpl_toolkits.natgrid import _natgrid
HAS_NATGRID = True
except ImportError:
HAS_NATGRID = False
class general_testcase(CleanupTestCase):
def test_colinear_pca(self):
a = mlab.PCA._get_colinear()
pca = mlab.PCA(a)
assert_allclose(pca.fracs[2:], 0., atol=1e-8)
assert_allclose(pca.Y[:, 2:], 0., atol=1e-8)
def test_prctile(self):
# test odd lengths
x = [1, 2, 3]
assert_equal(mlab.prctile(x, 50), np.median(x))
# test even lengths
x = [1, 2, 3, 4]
assert_equal(mlab.prctile(x, 50), np.median(x))
# derived from email sent by jason-sage to MPL-user on 20090914
ob1 = [1, 1, 2, 2, 1, 2, 4, 3, 2, 2, 2, 3,
4, 5, 6, 7, 8, 9, 7, 6, 4, 5, 5]
p = [0, 75, 100]
expected = [1, 5.5, 9]
# test vectorized
actual = mlab.prctile(ob1, p)
assert_allclose(expected, actual)
# test scalar
for pi, expectedi in zip(p, expected):
actuali = mlab.prctile(ob1, pi)
assert_allclose(expectedi, actuali)
def test_norm(self):
np.random.seed(0)
N = 1000
x = np.random.standard_normal(N)
targ = np.linalg.norm(x)
res = mlab._norm(x)
assert_almost_equal(targ, res)
class spacing_testcase(CleanupTestCase):
def test_logspace_tens(self):
xmin = .01
xmax = 1000.
N = 6
res = mlab.logspace(xmin, xmax, N)
targ = np.logspace(np.log10(xmin), np.log10(xmax), N)
assert_allclose(targ, res)
def test_logspace_primes(self):
xmin = .03
xmax = 1313.
N = 7
res = mlab.logspace(xmin, xmax, N)
targ = np.logspace(np.log10(xmin), np.log10(xmax), N)
assert_allclose(targ, res)
def test_logspace_none(self):
xmin = .03
xmax = 1313.
N = 0
res = mlab.logspace(xmin, xmax, N)
targ = np.logspace(np.log10(xmin), np.log10(xmax), N)
assert_array_equal(targ, res)
assert_equal(res.size, 0)
def test_logspace_single(self):
xmin = .03
xmax = 1313.
N = 1
res = mlab.logspace(xmin, xmax, N)
targ = np.logspace(np.log10(xmin), np.log10(xmax), N)
assert_array_equal(targ, res)
assert_equal(res.size, 1)
class stride_testcase(CleanupTestCase):
def get_base(self, x):
y = x
while y.base is not None:
y = y.base
return y
def calc_window_target(self, x, NFFT, noverlap=0):
'''This is an adaptation of the original window extraction
algorithm. This is here to test to make sure the new implementation
has the same result'''
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
# do the ffts of the slices
for i in range(n):
result[:, i] = x[ind[i]:ind[i]+NFFT]
return result
def test_stride_windows_2D_ValueError(self):
x = np.arange(10)[np.newaxis]
assert_raises(ValueError, mlab.stride_windows, x, 5)
def test_stride_windows_0D_ValueError(self):
x = np.array(0)
assert_raises(ValueError, mlab.stride_windows, x, 5)
def test_stride_windows_noverlap_gt_n_ValueError(self):
x = np.arange(10)
assert_raises(ValueError, mlab.stride_windows, x, 2, 3)
def test_stride_windows_noverlap_eq_n_ValueError(self):
x = np.arange(10)
assert_raises(ValueError, mlab.stride_windows, x, 2, 2)
def test_stride_windows_n_gt_lenx_ValueError(self):
x = np.arange(10)
assert_raises(ValueError, mlab.stride_windows, x, 11)
def test_stride_windows_n_lt_1_ValueError(self):
x = np.arange(10)
assert_raises(ValueError, mlab.stride_windows, x, 0)
def test_stride_repeat_2D_ValueError(self):
x = np.arange(10)[np.newaxis]
assert_raises(ValueError, mlab.stride_repeat, x, 5)
def test_stride_repeat_axis_lt_0_ValueError(self):
x = np.array(0)
assert_raises(ValueError, mlab.stride_repeat, x, 5, axis=-1)
def test_stride_repeat_axis_gt_1_ValueError(self):
x = np.array(0)
assert_raises(ValueError, mlab.stride_repeat, x, 5, axis=2)
def test_stride_repeat_n_lt_1_ValueError(self):
x = np.arange(10)
assert_raises(ValueError, mlab.stride_repeat, x, 0)
def test_stride_repeat_n1_axis0(self):
x = np.arange(10)
y = mlab.stride_repeat(x, 1)
assert_equal((1, ) + x.shape, y.shape)
assert_array_equal(x, y.flat)
assert_true(self.get_base(y) is x)
def test_stride_repeat_n1_axis1(self):
x = np.arange(10)
y = mlab.stride_repeat(x, 1, axis=1)
assert_equal(x.shape + (1, ), y.shape)
assert_array_equal(x, y.flat)
assert_true(self.get_base(y) is x)
def test_stride_repeat_n5_axis0(self):
x = np.arange(10)
y = mlab.stride_repeat(x, 5)
yr = np.repeat(x[np.newaxis], 5, axis=0)
assert_equal(yr.shape, y.shape)
assert_array_equal(yr, y)
assert_equal((5, ) + x.shape, y.shape)
assert_true(self.get_base(y) is x)
def test_stride_repeat_n5_axis1(self):
x = np.arange(10)
y = mlab.stride_repeat(x, 5, axis=1)
yr = np.repeat(x[np.newaxis], 5, axis=0).T
assert_equal(yr.shape, y.shape)
assert_array_equal(yr, y)
assert_equal(x.shape + (5, ), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n1_noverlap0_axis0(self):
x = np.arange(10)
y = mlab.stride_windows(x, 1)
yt = self.calc_window_target(x, 1)
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((1, ) + x.shape, y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n1_noverlap0_axis1(self):
x = np.arange(10)
y = mlab.stride_windows(x, 1, axis=1)
yt = self.calc_window_target(x, 1).T
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal(x.shape + (1, ), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n5_noverlap0_axis0(self):
x = np.arange(100)
y = mlab.stride_windows(x, 5)
yt = self.calc_window_target(x, 5)
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((5, 20), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n5_noverlap0_axis1(self):
x = np.arange(100)
y = mlab.stride_windows(x, 5, axis=1)
yt = self.calc_window_target(x, 5).T
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((20, 5), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n15_noverlap2_axis0(self):
x = np.arange(100)
y = mlab.stride_windows(x, 15, 2)
yt = self.calc_window_target(x, 15, 2)
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((15, 7), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n15_noverlap2_axis1(self):
x = np.arange(100)
y = mlab.stride_windows(x, 15, 2, axis=1)
yt = self.calc_window_target(x, 15, 2).T
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((7, 15), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n13_noverlapn3_axis0(self):
x = np.arange(100)
y = mlab.stride_windows(x, 13, -3)
yt = self.calc_window_target(x, 13, -3)
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((13, 6), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n13_noverlapn3_axis1(self):
x = np.arange(100)
y = mlab.stride_windows(x, 13, -3, axis=1)
yt = self.calc_window_target(x, 13, -3).T
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((6, 13), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n32_noverlap0_axis0_unflatten(self):
n = 32
x = np.arange(n)[np.newaxis]
x1 = np.tile(x, (21, 1))
x2 = x1.flatten()
y = mlab.stride_windows(x2, n)
assert_equal(y.shape, x1.T.shape)
assert_array_equal(y, x1.T)
def test_stride_windows_n32_noverlap0_axis1_unflatten(self):
n = 32
x = np.arange(n)[np.newaxis]
x1 = np.tile(x, (21, 1))
x2 = x1.flatten()
y = mlab.stride_windows(x2, n, axis=1)
assert_equal(y.shape, x1.shape)
assert_array_equal(y, x1)
def test_stride_ensure_integer_type(self):
N = 100
x = np.empty(N + 20, dtype='>f4')
x.fill(np.NaN)
y = x[10:-10]
y.fill(0.3)
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33, noverlap=0.6)
assert_array_equal(y_strided, 0.3)
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33.3, noverlap=0)
assert_array_equal(y_strided, 0.3)
# even previous to #3845 could not find any problematic
# configuration however, let's be sure it's not accidentally
# introduced
y_strided = mlab.stride_repeat(y, n=33.815)
assert_array_equal(y_strided, 0.3)
class csv_testcase(CleanupTestCase):
def setUp(self):
if six.PY3:
self.fd = tempfile.TemporaryFile(suffix='csv', mode="w+",
newline='')
else:
self.fd = tempfile.TemporaryFile(suffix='csv', mode="wb+")
def tearDown(self):
self.fd.close()
def test_recarray_csv_roundtrip(self):
expected = np.recarray((99,),
[(str('x'), np.float),
(str('y'), np.float),
(str('t'), np.float)])
# initialising all values: uninitialised memory sometimes produces
# floats that do not round-trip to string and back.
expected['x'][:] = np.linspace(-1e9, -1, 99)
expected['y'][:] = np.linspace(1, 1e9, 99)
expected['t'][:] = np.linspace(0, 0.01, 99)
mlab.rec2csv(expected, self.fd)
self.fd.seek(0)
actual = mlab.csv2rec(self.fd)
assert_allclose(expected['x'], actual['x'])
assert_allclose(expected['y'], actual['y'])
assert_allclose(expected['t'], actual['t'])
def test_rec2csv_bad_shape_ValueError(self):
bad = np.recarray((99, 4), [(str('x'), np.float),
(str('y'), np.float)])
# the bad recarray should trigger a ValueError for having ndim > 1.
assert_raises(ValueError, mlab.rec2csv, bad, self.fd)
def test_csv2rec_names_with_comments(self):
self.fd.write('# comment\n1,2,3\n4,5,6\n')
self.fd.seek(0)
array = mlab.csv2rec(self.fd, names='a,b,c')
assert len(array) == 2
assert len(array.dtype) == 3
def test_csv2rec_usdate(self):
self.fd.write('01/11/14\n' +
'03/05/76 12:00:01 AM\n' +
'07/09/83 5:17:34 PM\n' +
'06/20/2054 2:31:45 PM\n' +
'10/31/00 11:50:23 AM\n')
expected = [datetime.datetime(2014, 1, 11, 0, 0),
datetime.datetime(1976, 3, 5, 0, 0, 1),
datetime.datetime(1983, 7, 9, 17, 17, 34),
datetime.datetime(2054, 6, 20, 14, 31, 45),
datetime.datetime(2000, 10, 31, 11, 50, 23)]
self.fd.seek(0)
array = mlab.csv2rec(self.fd, names='a')
assert_array_equal(array['a'].tolist(), expected)
def test_csv2rec_dayfirst(self):
self.fd.write('11/01/14\n' +
'05/03/76 12:00:01 AM\n' +
'09/07/83 5:17:34 PM\n' +
'20/06/2054 2:31:45 PM\n' +
'31/10/00 11:50:23 AM\n')
expected = [datetime.datetime(2014, 1, 11, 0, 0),
datetime.datetime(1976, 3, 5, 0, 0, 1),
datetime.datetime(1983, 7, 9, 17, 17, 34),
datetime.datetime(2054, 6, 20, 14, 31, 45),
datetime.datetime(2000, 10, 31, 11, 50, 23)]
self.fd.seek(0)
array = mlab.csv2rec(self.fd, names='a', dayfirst=True)
assert_array_equal(array['a'].tolist(), expected)
def test_csv2rec_yearfirst(self):
self.fd.write('14/01/11\n' +
'76/03/05 12:00:01 AM\n' +
'83/07/09 5:17:34 PM\n' +
'2054/06/20 2:31:45 PM\n' +
'00/10/31 11:50:23 AM\n')
expected = [datetime.datetime(2014, 1, 11, 0, 0),
datetime.datetime(1976, 3, 5, 0, 0, 1),
datetime.datetime(1983, 7, 9, 17, 17, 34),
datetime.datetime(2054, 6, 20, 14, 31, 45),
datetime.datetime(2000, 10, 31, 11, 50, 23)]
self.fd.seek(0)
array = mlab.csv2rec(self.fd, names='a', yearfirst=True)
assert_array_equal(array['a'].tolist(), expected)
class rec2txt_testcase(CleanupTestCase):
def test_csv2txt_basic(self):
# str() calls around field names necessary b/c as of numpy 1.11
# dtype doesn't like unicode names (caused by unicode_literals import)
a = np.array([(1.0, 2, 'foo', 'bing'),
(2.0, 3, 'bar', 'blah')],
dtype=np.dtype([(str('x'), np.float32),
(str('y'), np.int8),
(str('s'), str, 3),
(str('s2'), str, 4)]))
truth = (' x y s s2\n'
' 1.000 2 foo bing \n'
' 2.000 3 bar blah ').splitlines()
assert_equal(mlab.rec2txt(a).splitlines(), truth)
class window_testcase(CleanupTestCase):
def setUp(self):
np.random.seed(0)
self.n = 1000
self.x = np.arange(0., self.n)
self.sig_rand = np.random.standard_normal(self.n) + 100.
self.sig_ones = np.ones_like(self.x)
self.sig_slope = np.linspace(-10., 90., self.n)
def check_window_apply_repeat(self, x, window, NFFT, noverlap):
'''This is an adaptation of the original window application
algorithm. This is here to test to make sure the new implementation
has the same result'''
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
if cbook.iterable(window):
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
# do the ffts of the slices
for i in range(n):
result[:, i] = windowVals * x[ind[i]:ind[i]+NFFT]
return result
def test_window_none_rand(self):
res = mlab.window_none(self.sig_ones)
assert_array_equal(res, self.sig_ones)
def test_window_none_ones(self):
res = mlab.window_none(self.sig_rand)
assert_array_equal(res, self.sig_rand)
def test_window_hanning_rand(self):
targ = np.hanning(len(self.sig_rand)) * self.sig_rand
res = mlab.window_hanning(self.sig_rand)
assert_allclose(targ, res, atol=1e-06)
def test_window_hanning_ones(self):
targ = np.hanning(len(self.sig_ones))
res = mlab.window_hanning(self.sig_ones)
assert_allclose(targ, res, atol=1e-06)
def test_apply_window_1D_axis1_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning
assert_raises(ValueError, mlab.apply_window, x, window, axis=1,
return_window=False)
def test_apply_window_1D_els_wrongsize_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]-1))
assert_raises(ValueError, mlab.apply_window, x, window)
def test_apply_window_0D_ValueError(self):
x = np.array(0)
window = mlab.window_hanning
assert_raises(ValueError, mlab.apply_window, x, window, axis=1,
return_window=False)
def test_apply_window_3D_ValueError(self):
x = self.sig_rand[np.newaxis][np.newaxis]
window = mlab.window_hanning
assert_raises(ValueError, mlab.apply_window, x, window, axis=1,
return_window=False)
def test_apply_window_hanning_1D(self):
x = self.sig_rand
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = mlab.apply_window(x, window, return_window=True)
yt = window(x)
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
y = mlab.apply_window(x, window, axis=0, return_window=False)
yt = window(x)
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = mlab.apply_window(x, window, axis=0, return_window=False)
yt = window1(x)
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
y = mlab.apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window(x[:, i])
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els1_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = mlab.apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1(x[:, i])
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els2_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = mlab.apply_window(x, window, axis=0, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1*x[:, i]
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_els3_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = mlab.apply_window(x, window, axis=0, return_window=True)
yt = mlab.apply_window(x, window1, axis=0, return_window=False)
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
y = mlab.apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window(x[i, :])
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D__els1_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning(np.ones(x.shape[1]))
window1 = mlab.window_hanning
y = mlab.apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1(x[i, :])
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_els2_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[1]))
y, window2 = mlab.apply_window(x, window, axis=1, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1 * x[i, :]
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_els3_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[1]))
y = mlab.apply_window(x, window, axis=1, return_window=False)
yt = mlab.apply_window(x, window1, axis=1, return_window=False)
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_stride_windows_hanning_2D_n13_noverlapn3_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
yi = mlab.stride_windows(x, n=13, noverlap=2, axis=0)
y = mlab.apply_window(yi, window, axis=0, return_window=False)
yt = self.check_window_apply_repeat(x, window, 13, 2)
assert_equal(yt.shape, y.shape)
assert_not_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_stack_axis1(self):
ydata = np.arange(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = mlab.apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
result = mlab.apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_allclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_stack_windows_axis1(self):
ydata = np.arange(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = mlab.apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
result = mlab.apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_allclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_stack_windows_axis1_unflatten(self):
n = 32
ydata = np.arange(n)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = mlab.apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydata = ydata.flatten()
ydata1 = mlab.stride_windows(ydata, 32, noverlap=0, axis=0)
result = mlab.apply_window(ydata1, mlab.window_hanning, axis=0,
return_window=False)
assert_allclose(ycontrol.T, result, atol=1e-08)
class detrend_testcase(CleanupTestCase):
def setUp(self):
np.random.seed(0)
n = 1000
x = np.linspace(0., 100, n)
self.sig_zeros = np.zeros(n)
self.sig_off = self.sig_zeros + 100.
self.sig_slope = np.linspace(-10., 90., n)
self.sig_slope_mean = x - x.mean()
sig_rand = np.random.standard_normal(n)
sig_sin = np.sin(x*2*np.pi/(n/100))
sig_rand -= sig_rand.mean()
sig_sin -= sig_sin.mean()
self.sig_base = sig_rand + sig_sin
self.atol = 1e-08
def test_detrend_none_0D_zeros(self):
input = 0.
targ = input
res = mlab.detrend_none(input)
assert_equal(input, targ)
def test_detrend_none_0D_zeros_axis1(self):
input = 0.
targ = input
res = mlab.detrend_none(input, axis=1)
assert_equal(input, targ)
def test_detrend_str_none_0D_zeros(self):
input = 0.
targ = input
res = mlab.detrend(input, key='none')
assert_equal(input, targ)
def test_detrend_detrend_none_0D_zeros(self):
input = 0.
targ = input
res = mlab.detrend(input, key=mlab.detrend_none)
assert_equal(input, targ)
def test_detrend_none_0D_off(self):
input = 5.5
targ = input
res = mlab.detrend_none(input)
assert_equal(input, targ)
def test_detrend_none_1D_off(self):
input = self.sig_off
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_slope(self):
input = self.sig_slope
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_base(self):
input = self.sig_base
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = input.tolist()
res = mlab.detrend_none(input.tolist())
assert_equal(res, targ)
def test_detrend_none_2D(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
input = np.vstack(arri)
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_2D_T(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
input = np.vstack(arri)
targ = input
res = mlab.detrend_none(input.T)
assert_array_equal(res.T, targ)
def test_detrend_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend_mean(input)
assert_almost_equal(res, targ)
def test_detrend_str_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend(input, key='mean')
assert_almost_equal(res, targ)
def test_detrend_detrend_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_mean)
assert_almost_equal(res, targ)
def test_detrend_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend_mean(input)
assert_almost_equal(res, targ)
def test_detrend_str_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key='mean')
assert_almost_equal(res, targ)
def test_detrend_detrend_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_mean)
assert_almost_equal(res, targ)
def test_detrend_mean_1D_zeros(self):
input = self.sig_zeros
targ = self.sig_zeros
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base(self):
input = self.sig_base
targ = self.sig_base
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_off(self):
input = self.sig_base + self.sig_off
targ = self.sig_base
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_slope(self):
input = self.sig_base + self.sig_slope
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_slope_off(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input.tolist())
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_list_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input.tolist(), axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_demean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.demean(input, axis=None)
assert_almost_equal(res, targ)
def test_demean_1D_base_slope_off(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.demean(input)
assert_allclose(res, targ, atol=1e-08)
def test_demean_1D_base_slope_off_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.demean(input, axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_demean_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.demean(input.tolist())
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_2D_default(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axism1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=-1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axism1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=-1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_2D_default(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, axis=None)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_str_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key='mean', axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_str_constant_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend(input, key='constant', axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_str_default_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key='default', axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key=mlab.detrend_mean, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_default(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.demean(input)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.demean(input, axis=None)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.demean(input, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.demean(input, axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_axism1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.demean(input, axis=-1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_bad_key_str_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.detrend, input, key='spam')
def test_detrend_bad_key_var_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.detrend, input, key=5)
def test_detrend_mean_0D_d0_ValueError(self):
input = 5.5
assert_raises(ValueError, mlab.detrend_mean, input, axis=0)
def test_detrend_0D_d0_ValueError(self):
input = 5.5
assert_raises(ValueError, mlab.detrend, input, axis=0)
def test_detrend_mean_1D_d1_ValueError(self):
input = self.sig_slope
assert_raises(ValueError, mlab.detrend_mean, input, axis=1)
def test_detrend_1D_d1_ValueError(self):
input = self.sig_slope
assert_raises(ValueError, mlab.detrend, input, axis=1)
def test_demean_1D_d1_ValueError(self):
input = self.sig_slope
assert_raises(ValueError, mlab.demean, input, axis=1)
def test_detrend_mean_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.detrend_mean, input, axis=2)
def test_detrend_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.detrend, input, axis=2)
def test_demean_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.demean, input, axis=2)
def test_detrend_linear_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend_linear(input)
assert_almost_equal(res, targ)
def test_detrend_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend_linear(input)
assert_almost_equal(res, targ)
def test_detrend_str_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key='linear')
assert_almost_equal(res, targ)
def test_detrend_detrend_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_linear)
assert_almost_equal(res, targ)
def test_detrend_linear_1d_off(self):
input = self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope(self):
input = self.sig_slope
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_str_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend(input, key='linear')
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend(input, key=mlab.detrend_linear)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope_off_list(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input.tolist())
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_2D_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.detrend_linear, input)
def test_detrend_str_linear_2d_slope_off_axis0(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key='linear', axis=0)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off_axis1(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key=mlab.detrend_linear, axis=0)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_str_linear_2d_slope_off_axis0(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key='linear', axis=1)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off_axis1(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key=mlab.detrend_linear, axis=1)
assert_allclose(res, targ, atol=self.atol)
class spectral_testcase_nosig_real_onesided(CleanupTestCase):
def setUp(self):
self.createStim(fstims=[],
iscomplex=False, sides='onesided', nsides=1)
def createStim(self, fstims, iscomplex, sides, nsides, len_x=None,
NFFT_density=-1, nover_density=-1, pad_to_density=-1,
pad_to_spectrum=-1):
Fs = 100.
x = np.arange(0, 10, 1/Fs)
if len_x is not None:
x = x[:len_x]
# get the stimulus frequencies, defaulting to None
fstims = [Fs/fstim for fstim in fstims]
# get the constants, default to calculated values
if NFFT_density is None:
NFFT_density_real = 256
elif NFFT_density < 0:
NFFT_density_real = NFFT_density = 100
else:
NFFT_density_real = NFFT_density
if nover_density is None:
nover_density_real = 0
elif nover_density < 0:
nover_density_real = nover_density = NFFT_density_real//2
else:
nover_density_real = nover_density
if pad_to_density is None:
pad_to_density_real = NFFT_density_real
elif pad_to_density < 0:
pad_to_density = int(2**np.ceil(np.log2(NFFT_density_real)))
pad_to_density_real = pad_to_density
else:
pad_to_density_real = pad_to_density
if pad_to_spectrum is None:
pad_to_spectrum_real = len(x)
elif pad_to_spectrum < 0:
pad_to_spectrum_real = pad_to_spectrum = len(x)
else:
pad_to_spectrum_real = pad_to_spectrum
if pad_to_spectrum is None:
NFFT_spectrum_real = NFFT_spectrum = pad_to_spectrum_real
else:
NFFT_spectrum_real = NFFT_spectrum = len(x)
nover_spectrum_real = nover_spectrum = 0
NFFT_specgram = NFFT_density
nover_specgram = nover_density
pad_to_specgram = pad_to_density
NFFT_specgram_real = NFFT_density_real
nover_specgram_real = nover_density_real
if nsides == 1:
# frequencies for specgram, psd, and csd
# need to handle even and odd differently
if pad_to_density_real % 2:
freqs_density = np.linspace(0, Fs/2,
num=pad_to_density_real,
endpoint=False)[::2]
else:
freqs_density = np.linspace(0, Fs/2,
num=pad_to_density_real//2+1)
# frequencies for complex, magnitude, angle, and phase spectrums
# need to handle even and odd differently
if pad_to_spectrum_real % 2:
freqs_spectrum = np.linspace(0, Fs/2,
num=pad_to_spectrum_real,
endpoint=False)[::2]
else:
freqs_spectrum = np.linspace(0, Fs/2,
num=pad_to_spectrum_real//2+1)
else:
# frequencies for specgram, psd, and csd
# need to handle even and odd differentl
if pad_to_density_real % 2:
freqs_density = np.linspace(-Fs/2, Fs/2,
num=2*pad_to_density_real,
endpoint=False)[1::2]
else:
freqs_density = np.linspace(-Fs/2, Fs/2,
num=pad_to_density_real,
endpoint=False)
# frequencies for complex, magnitude, angle, and phase spectrums
# need to handle even and odd differently
if pad_to_spectrum_real % 2:
freqs_spectrum = np.linspace(-Fs/2, Fs/2,
num=2*pad_to_spectrum_real,
endpoint=False)[1::2]
else:
freqs_spectrum = np.linspace(-Fs/2, Fs/2,
num=pad_to_spectrum_real,
endpoint=False)
freqs_specgram = freqs_density
# time points for specgram
t_start = NFFT_specgram_real//2
t_stop = len(x) - NFFT_specgram_real//2+1
t_step = NFFT_specgram_real - nover_specgram_real
t_specgram = x[t_start:t_stop:t_step]
if NFFT_specgram_real % 2:
t_specgram += 1/Fs/2
if len(t_specgram) == 0:
t_specgram = np.array([NFFT_specgram_real/(2*Fs)])
t_spectrum = np.array([NFFT_spectrum_real/(2*Fs)])
t_density = t_specgram
y = np.zeros_like(x)
for i, fstim in enumerate(fstims):
y += np.sin(fstim * x * np.pi * 2) * 10**i
if iscomplex:
y = y.astype('complex')
self.Fs = Fs
self.sides = sides
self.fstims = fstims
self.NFFT_density = NFFT_density
self.nover_density = nover_density
self.pad_to_density = pad_to_density
self.NFFT_spectrum = NFFT_spectrum
self.nover_spectrum = nover_spectrum
self.pad_to_spectrum = pad_to_spectrum
self.NFFT_specgram = NFFT_specgram
self.nover_specgram = nover_specgram
self.pad_to_specgram = pad_to_specgram
self.t_specgram = t_specgram
self.t_density = t_density
self.t_spectrum = t_spectrum
self.y = y
self.freqs_density = freqs_density
self.freqs_spectrum = freqs_spectrum
self.freqs_specgram = freqs_specgram
self.NFFT_density_real = NFFT_density_real
def check_freqs(self, vals, targfreqs, resfreqs, fstims):
assert_true(resfreqs.argmin() == 0)
assert_true(resfreqs.argmax() == len(resfreqs)-1)
assert_allclose(resfreqs, targfreqs, atol=1e-06)
for fstim in fstims:
i = np.abs(resfreqs - fstim).argmin()
assert_true(vals[i] > vals[i+2])
assert_true(vals[i] > vals[i-2])
def check_maxfreq(self, spec, fsp, fstims):
# skip the test if there are no frequencies
if len(fstims) == 0:
return
# if twosided, do the test for each side
if fsp.min() < 0:
fspa = np.abs(fsp)
zeroind = fspa.argmin()
self.check_maxfreq(spec[:zeroind], fspa[:zeroind], fstims)
self.check_maxfreq(spec[zeroind:], fspa[zeroind:], fstims)
return
fstimst = fstims[:]
spect = spec.copy()
# go through each peak and make sure it is correctly the maximum peak
while fstimst:
maxind = spect.argmax()
maxfreq = fsp[maxind]
assert_almost_equal(maxfreq, fstimst[-1])
del fstimst[-1]
spect[maxind-5:maxind+5] = 0
def test_spectral_helper_raises_complex_same_data(self):
# test that mode 'complex' cannot be used if x is not y
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y+1, mode='complex')
def test_spectral_helper_raises_magnitude_same_data(self):
# test that mode 'magnitude' cannot be used if x is not y
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y+1, mode='magnitude')
def test_spectral_helper_raises_angle_same_data(self):
# test that mode 'angle' cannot be used if x is not y
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y+1, mode='angle')
def test_spectral_helper_raises_phase_same_data(self):
# test that mode 'phase' cannot be used if x is not y
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y+1, mode='phase')
def test_spectral_helper_raises_unknown_mode(self):
# test that unknown value for mode cannot be used
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, mode='spam')
def test_spectral_helper_raises_unknown_sides(self):
# test that unknown value for sides cannot be used
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y, sides='eggs')
def test_spectral_helper_raises_noverlap_gt_NFFT(self):
# test that noverlap cannot be larger than NFFT
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y, NFFT=10, noverlap=20)
def test_spectral_helper_raises_noverlap_eq_NFFT(self):
# test that noverlap cannot be equal to NFFT
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, NFFT=10, noverlap=10)
def test_spectral_helper_raises_winlen_ne_NFFT(self):
# test that the window length cannot be different from NFFT
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y, NFFT=10, window=np.ones(9))
def test_single_spectrum_helper_raises_mode_default(self):
# test that mode 'default' cannot be used with _single_spectrum_helper
assert_raises(ValueError, mlab._single_spectrum_helper,
x=self.y, mode='default')
def test_single_spectrum_helper_raises_mode_psd(self):
# test that mode 'psd' cannot be used with _single_spectrum_helper
assert_raises(ValueError, mlab._single_spectrum_helper,
x=self.y, mode='psd')
def test_spectral_helper_psd(self):
freqs = self.freqs_density
spec, fsp, t = mlab._spectral_helper(x=self.y, y=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
mode='psd')
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_density, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
def test_spectral_helper_magnitude_specgram(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab._spectral_helper(x=self.y, y=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='magnitude')
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
def test_spectral_helper_magnitude_magnitude_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp, t = mlab._spectral_helper(x=self.y, y=self.y,
NFFT=self.NFFT_spectrum,
Fs=self.Fs,
noverlap=self.nover_spectrum,
pad_to=self.pad_to_spectrum,
sides=self.sides,
mode='magnitude')
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_spectrum, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], 1)
def test_csd(self):
freqs = self.freqs_density
spec, fsp = mlab.csd(x=self.y, y=self.y+1,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_allclose(fsp, freqs, atol=1e-06)
assert_equal(spec.shape, freqs.shape)
def test_psd(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_equal(spec.shape, freqs.shape)
self.check_freqs(spec, freqs, fsp, self.fstims)
def test_psd_detrend_mean_func_offset(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.zeros(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_mean)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_mean)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_detrend_mean_str_offset(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.zeros(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend='mean')
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend='mean')
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_detrend_linear_func_trend(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.arange(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_detrend_linear_str_trend(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.arange(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend='linear')
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend='linear')
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_window_hanning(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.arange(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1, windowVals = mlab.apply_window(ydata1,
mlab.window_hanning,
return_window=True)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydatab = ydata.T.flatten()
ydataf = ydata.flatten()
ycontrol = ycontrol.flatten()
spec_g, fsp_g = mlab.psd(x=ydataf,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_hanning)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_hanning)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_none)
spec_c *= len(ycontrol1)/(np.abs(windowVals)**2).sum()
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_window_hanning_detrend_linear(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.arange(self.NFFT_density)
ycontrol = np.zeros(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = ycontrol
ycontrol2 = ycontrol
ycontrol1, windowVals = mlab.apply_window(ycontrol1,
mlab.window_hanning,
return_window=True)
ycontrol2 = mlab.window_hanning(ycontrol2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydatab = ydata.T.flatten()
ydataf = ydata.flatten()
ycontrol = ycontrol.flatten()
spec_g, fsp_g = mlab.psd(x=ydataf,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear,
window=mlab.window_hanning)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear,
window=mlab.window_hanning)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_none)
spec_c *= len(ycontrol1)/(np.abs(windowVals)**2).sum()
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_windowarray(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=np.ones(self.NFFT_density_real))
assert_allclose(fsp, freqs, atol=1e-06)
assert_equal(spec.shape, freqs.shape)
def test_psd_windowarray_scale_by_freq(self):
freqs = self.freqs_density
win = mlab.window_hanning(np.ones(self.NFFT_density_real))
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning)
spec_s, fsp_s = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning,
scale_by_freq=True)
spec_n, fsp_n = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning,
scale_by_freq=False)
assert_array_equal(fsp, fsp_s)
assert_array_equal(fsp, fsp_n)
assert_array_equal(spec, spec_s)
assert_allclose(spec_s*(win**2).sum(),
spec_n/self.Fs*win.sum()**2,
atol=1e-08)
def test_complex_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp = mlab.complex_spectrum(x=self.y,
Fs=self.Fs,
sides=self.sides,
pad_to=self.pad_to_spectrum)
assert_allclose(fsp, freqs, atol=1e-06)
assert_equal(spec.shape, freqs.shape)
def test_magnitude_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp = mlab.magnitude_spectrum(x=self.y,
Fs=self.Fs,
sides=self.sides,
pad_to=self.pad_to_spectrum)
assert_equal(spec.shape, freqs.shape)
self.check_maxfreq(spec, fsp, self.fstims)
self.check_freqs(spec, freqs, fsp, self.fstims)
def test_angle_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp = mlab.angle_spectrum(x=self.y,
Fs=self.Fs,
sides=self.sides,
pad_to=self.pad_to_spectrum)
assert_allclose(fsp, freqs, atol=1e-06)
assert_equal(spec.shape, freqs.shape)
def test_phase_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp = mlab.phase_spectrum(x=self.y,
Fs=self.Fs,
sides=self.sides,
pad_to=self.pad_to_spectrum)
assert_allclose(fsp, freqs, atol=1e-06)
assert_equal(spec.shape, freqs.shape)
def test_specgram_auto(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides)
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
# since we are using a single freq, all time slices
# should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(np.diff(spec, axis=1).max()/np.abs(spec.max()), 0,
atol=1e-02)
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_default(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='default')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
# since we are using a single freq, all time slices
# should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(np.diff(spec, axis=1).max()/np.abs(spec.max()), 0,
atol=1e-02)
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_psd(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='psd')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
# since we are using a single freq, all time slices
# should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(np.diff(spec, axis=1).max()/np.abs(spec.max()), 0,
atol=1e-02)
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_complex(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
specm = np.mean(np.abs(spec), axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_magnitude(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='magnitude')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
# since we are using a single freq, all time slices
# should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(np.diff(spec, axis=1).max()/np.abs(spec.max()), 0,
atol=1e-02)
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_angle(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='angle')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
def test_specgram_phase(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='phase')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
def test_psd_csd_equal(self):
freqs = self.freqs_density
Pxx, freqsxx = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
Pxy, freqsxy = mlab.csd(x=self.y, y=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_array_equal(Pxx, Pxy)
assert_array_equal(freqsxx, freqsxy)
def test_specgram_auto_default_equal(self):
'''test that mlab.specgram without mode and with mode 'default' and
'psd' are all the same'''
freqs = self.freqs_specgram
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides)
specb, freqspecb, tb = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='default')
assert_array_equal(speca, specb)
assert_array_equal(freqspeca, freqspecb)
assert_array_equal(ta, tb)
def test_specgram_auto_psd_equal(self):
'''test that mlab.specgram without mode and with mode 'default' and
'psd' are all the same'''
freqs = self.freqs_specgram
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides)
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='psd')
assert_array_equal(speca, specc)
assert_array_equal(freqspeca, freqspecc)
assert_array_equal(ta, tc)
def test_specgram_complex_mag_equivalent(self):
freqs = self.freqs_specgram
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
specm, freqspecm, tm = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='magnitude')
assert_array_equal(freqspecc, freqspecm)
assert_array_equal(tc, tm)
assert_allclose(np.abs(specc), specm, atol=1e-06)
def test_specgram_complex_angle_equivalent(self):
freqs = self.freqs_specgram
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='angle')
assert_array_equal(freqspecc, freqspeca)
assert_array_equal(tc, ta)
assert_allclose(np.angle(specc), speca, atol=1e-06)
def test_specgram_complex_phase_equivalent(self):
freqs = self.freqs_specgram
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
specp, freqspecp, tp = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='phase')
assert_array_equal(freqspecc, freqspecp)
assert_array_equal(tc, tp)
assert_allclose(np.unwrap(np.angle(specc), axis=0), specp,
atol=1e-06)
def test_specgram_angle_phase_equivalent(self):
freqs = self.freqs_specgram
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='angle')
specp, freqspecp, tp = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='phase')
assert_array_equal(freqspeca, freqspecp)
assert_array_equal(ta, tp)
assert_allclose(np.unwrap(speca, axis=0), specp,
atol=1e-06)
def test_psd_windowarray_equal(self):
freqs = self.freqs_density
win = mlab.window_hanning(np.ones(self.NFFT_density_real))
speca, fspa = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=win)
specb, fspb = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_array_equal(fspa, fspb)
assert_allclose(speca, specb, atol=1e-08)
class spectral_testcase_nosig_real_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_Fs4_real_onesided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_Fs4_real_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_Fs4_real_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_Fs4_complex_onesided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_Fs4_complex_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_Fs4_complex_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_FsAll_real_onesided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_FsAll_real_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_FsAll_real_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_FsAll_complex_onesided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_FsAll_complex_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_FsAll_complex_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=512, pad_to_spectrum=128,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=512, pad_to_spectrum=128,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=512, pad_to_spectrum=128,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=512, pad_to_spectrum=128,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=512, pad_to_spectrum=128,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=128, pad_to_spectrum=128,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=33, pad_to_spectrum=None,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=33, pad_to_spectrum=None,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=33, pad_to_spectrum=None,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=33, pad_to_spectrum=None,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=33, pad_to_spectrum=None,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=128, pad_to_spectrum=None,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=True, sides='default', nsides=2)
def test_griddata_linear():
# z is a linear function of x and y.
def get_z(x, y):
return 3.0*x - y
# Passing 1D xi and yi arrays to griddata.
x = np.asarray([0.0, 1.0, 0.0, 1.0, 0.5])
y = np.asarray([0.0, 0.0, 1.0, 1.0, 0.5])
z = get_z(x, y)
xi = [0.2, 0.4, 0.6, 0.8]
yi = [0.1, 0.3, 0.7, 0.9]
zi = mlab.griddata(x, y, z, xi, yi, interp='linear')
xi, yi = np.meshgrid(xi, yi)
np.testing.assert_array_almost_equal(zi, get_z(xi, yi))
# Passing 2D xi and yi arrays to griddata.
zi = mlab.griddata(x, y, z, xi, yi, interp='linear')
np.testing.assert_array_almost_equal(zi, get_z(xi, yi))
# Masking z array.
z_masked = np.ma.array(z, mask=[False, False, False, True, False])
correct_zi_masked = np.ma.masked_where(xi + yi > 1.0, get_z(xi, yi))
zi = mlab.griddata(x, y, z_masked, xi, yi, interp='linear')
matest.assert_array_almost_equal(zi, correct_zi_masked)
np.testing.assert_array_equal(np.ma.getmask(zi),
np.ma.getmask(correct_zi_masked))
@knownfailureif(not HAS_NATGRID)
def test_griddata_nn():
# z is a linear function of x and y.
def get_z(x, y):
return 3.0*x - y
# Passing 1D xi and yi arrays to griddata.
x = np.asarray([0.0, 1.0, 0.0, 1.0, 0.5])
y = np.asarray([0.0, 0.0, 1.0, 1.0, 0.5])
z = get_z(x, y)
xi = [0.2, 0.4, 0.6, 0.8]
yi = [0.1, 0.3, 0.7, 0.9]
correct_zi = [[0.49999252, 1.0999978, 1.7000030, 2.3000080],
[0.29999208, 0.8999978, 1.5000029, 2.1000059],
[-0.1000099, 0.4999943, 1.0999964, 1.6999979],
[-0.3000128, 0.2999894, 0.8999913, 1.4999933]]
zi = mlab.griddata(x, y, z, xi, yi, interp='nn')
np.testing.assert_array_almost_equal(zi, correct_zi, 5)
# Decreasing xi or yi should raise ValueError.
assert_raises(ValueError, mlab.griddata, x, y, z, xi[::-1], yi,
interp='nn')
assert_raises(ValueError, mlab.griddata, x, y, z, xi, yi[::-1],
interp='nn')
# Passing 2D xi and yi arrays to griddata.
xi, yi = np.meshgrid(xi, yi)
zi = mlab.griddata(x, y, z, xi, yi, interp='nn')
np.testing.assert_array_almost_equal(zi, correct_zi, 5)
# Masking z array.
z_masked = np.ma.array(z, mask=[False, False, False, True, False])
correct_zi_masked = np.ma.masked_where(xi + yi > 1.0, correct_zi)
zi = mlab.griddata(x, y, z_masked, xi, yi, interp='nn')
np.testing.assert_array_almost_equal(zi, correct_zi_masked, 5)
np.testing.assert_array_equal(np.ma.getmask(zi),
np.ma.getmask(correct_zi_masked))
#*****************************************************************
# These Tests where taken from SCIPY with some minor modifications
# this can be retreived from:
# https://github.com/scipy/scipy/blob/master/scipy/stats/tests/test_kdeoth.py
#*****************************************************************
class gaussian_kde_tests():
def test_kde_integer_input(self):
"""Regression test for #1181."""
x1 = np.arange(5)
kde = mlab.GaussianKDE(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869,
0.13480721]
np.testing.assert_array_almost_equal(kde(x1), y_expected, decimal=6)
def test_gaussian_kde_covariance_caching(self):
x1 = np.array([-7, -5, 1, 4, 5], dtype=np.float)
xs = np.linspace(-10, 10, num=5)
# These expected values are from scipy 0.10, before some changes to
# gaussian_kde. They were not compared with any external reference.
y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754,
0.01664475]
# set it to the default bandwidth.
kde2 = mlab.GaussianKDE(x1, 'scott')
y2 = kde2(xs)
np.testing.assert_array_almost_equal(y_expected, y2, decimal=7)
def test_kde_bandwidth_method(self):
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = mlab.GaussianKDE(xn)
# Supply a callable
gkde2 = mlab.GaussianKDE(xn, 'scott')
# Supply a scalar
gkde3 = mlab.GaussianKDE(xn, bw_method=gkde.factor)
xs = np.linspace(-7, 7, 51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert_almost_equal(kdepdf.all(), kdepdf2.all())
kdepdf3 = gkde3.evaluate(xs)
assert_almost_equal(kdepdf.all(), kdepdf3.all())
class gaussian_kde_custom_tests(object):
def test_no_data(self):
"""Pass no data into the GaussianKDE class."""
assert_raises(ValueError, mlab.GaussianKDE, [])
def test_single_dataset_element(self):
"""Pass a single dataset element into the GaussianKDE class."""
assert_raises(ValueError, mlab.GaussianKDE, [42])
def test_silverman_multidim_dataset(self):
"""Use a multi-dimensional array as the dataset and test silverman's
output"""
x1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(np.linalg.LinAlgError, mlab.GaussianKDE, x1, "silverman")
def test_silverman_singledim_dataset(self):
"""Use a single dimension list as the dataset and test silverman's
output."""
x1 = np.array([-7, -5, 1, 4, 5])
mygauss = mlab.GaussianKDE(x1, "silverman")
y_expected = 0.76770389927475502
assert_almost_equal(mygauss.covariance_factor(), y_expected, 7)
def test_scott_multidim_dataset(self):
"""Use a multi-dimensional array as the dataset and test scott's output
"""
x1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(np.linalg.LinAlgError, mlab.GaussianKDE, x1, "scott")
def test_scott_singledim_dataset(self):
"""Use a single-dimensional array as the dataset and test scott's
output"""
x1 = np.array([-7, -5, 1, 4, 5])
mygauss = mlab.GaussianKDE(x1, "scott")
y_expected = 0.72477966367769553
assert_almost_equal(mygauss.covariance_factor(), y_expected, 7)
def test_scalar_empty_dataset(self):
"""Use an empty array as the dataset and test the scalar's cov factor
"""
assert_raises(ValueError, mlab.GaussianKDE, [], bw_method=5)
def test_scalar_covariance_dataset(self):
"""Use a dataset and test a scalar's cov factor
"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = [np.random.randn(n_basesample) for i in range(5)]
kde = mlab.GaussianKDE(multidim_data, bw_method=0.5)
assert_equal(kde.covariance_factor(), 0.5)
def test_callable_covariance_dataset(self):
"""Use a multi-dimensional array as the dataset and test the callable's
cov factor"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = [np.random.randn(n_basesample) for i in range(5)]
def callable_fun(x):
return 0.55
kde = mlab.GaussianKDE(multidim_data, bw_method=callable_fun)
assert_equal(kde.covariance_factor(), 0.55)
def test_callable_singledim_dataset(self):
"""Use a single-dimensional array as the dataset and test the
callable's cov factor"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = np.random.randn(n_basesample)
kde = mlab.GaussianKDE(multidim_data, bw_method='silverman')
y_expected = 0.48438841363348911
assert_almost_equal(kde.covariance_factor(), y_expected, 7)
def test_wrong_bw_method(self):
"""Test the error message that should be called when bw is invalid."""
np.random.seed(8765678)
n_basesample = 50
data = np.random.randn(n_basesample)
assert_raises(ValueError, mlab.GaussianKDE, data, bw_method="invalid")
class gaussian_kde_evaluate_tests(object):
def test_evaluate_diff_dim(self):
"""Test the evaluate method when the dim's of dataset and points are
different dimensions"""
x1 = np.arange(3, 10, 2)
kde = mlab.GaussianKDE(x1)
x2 = np.arange(3, 12, 2)
y_expected = [
0.08797252, 0.11774109, 0.11774109, 0.08797252, 0.0370153
]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_evaluate_inv_dim(self):
""" Invert the dimensions. i.e., Give the dataset a dimension of
1 [3,2,4], and the points will have a dimension of 3 [[3],[2],[4]].
ValueError should be raised"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = np.random.randn(n_basesample)
kde = mlab.GaussianKDE(multidim_data)
x2 = [[1], [2], [3]]
assert_raises(ValueError, kde.evaluate, x2)
def test_evaluate_dim_and_num(self):
""" Tests if evaluated against a one by one array"""
x1 = np.arange(3, 10, 2)
x2 = np.array([3])
kde = mlab.GaussianKDE(x1)
y_expected = [0.08797252]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_evaluate_point_dim_not_one(self):
"""Test"""
x1 = np.arange(3, 10, 2)
x2 = [np.arange(3, 10, 2), np.arange(3, 10, 2)]
kde = mlab.GaussianKDE(x1)
assert_raises(ValueError, kde.evaluate, x2)
def test_evaluate_equal_dim_and_num_lt(self):
"""Test when line 3810 fails"""
x1 = np.arange(3, 10, 2)
x2 = np.arange(3, 8, 2)
kde = mlab.GaussianKDE(x1)
y_expected = [0.08797252, 0.11774109, 0.11774109]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_contiguous_regions():
a, b, c = 3, 4, 5
# Starts and ends with True
mask = [True]*a + [False]*b + [True]*c
expected = [(0, a), (a+b, a+b+c)]
assert_equal(mlab.contiguous_regions(mask), expected)
d, e = 6, 7
# Starts with True ends with False
mask = mask + [False]*e
assert_equal(mlab.contiguous_regions(mask), expected)
# Starts with False ends with True
mask = [False]*d + mask[:-e]
expected = [(d, d+a), (d+a+b, d+a+b+c)]
assert_equal(mlab.contiguous_regions(mask), expected)
# Starts and ends with False
mask = mask + [False]*e
assert_equal(mlab.contiguous_regions(mask), expected)
# No True in mask
assert_equal(mlab.contiguous_regions([False]*5), [])
# Empty mask
assert_equal(mlab.contiguous_regions([]), [])
def test_psd_onesided_norm():
u = np.array([0, 1, 2, 3, 1, 2, 1])
dt = 1.0
Su = np.abs(np.fft.fft(u) * dt)**2 / float(dt * u.size)
P, f = mlab.psd(u, NFFT=u.size, Fs=1/dt, window=mlab.window_none,
detrend=mlab.detrend_none, noverlap=0, pad_to=None,
scale_by_freq=None,
sides='onesided')
Su_1side = np.append([Su[0]], Su[1:4] + Su[4:][::-1])
assert_allclose(P, Su_1side, atol=1e-06)
if __name__ == '__main__':
import nose
import sys
args = ['-s', '--with-doctest']
argv = sys.argv
argv = argv[:1] + args + argv[1:]
nose.runmodule(argv=argv, exit=False)
| gpl-3.0 |
dimarkov/seaborn | seaborn/tests/test_axisgrid.py | 11 | 42805 | import warnings
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
from distutils.version import LooseVersion
import nose.tools as nt
import numpy.testing as npt
from numpy.testing.decorators import skipif
import pandas.util.testing as tm
from .. import axisgrid as ag
from .. import rcmod
from ..palettes import color_palette
from ..distributions import kdeplot
from ..categorical import pointplot
from ..linearmodels import pairplot
from ..utils import categorical_order
rs = np.random.RandomState(0)
old_matplotlib = LooseVersion(mpl.__version__) < "1.4"
class TestFacetGrid(object):
df = pd.DataFrame(dict(x=rs.normal(size=60),
y=rs.gamma(4, size=60),
a=np.repeat(list("abc"), 20),
b=np.tile(list("mn"), 30),
c=np.tile(list("tuv"), 20),
d=np.tile(list("abcdefghij"), 6)))
def test_self_data(self):
g = ag.FacetGrid(self.df)
nt.assert_is(g.data, self.df)
plt.close("all")
def test_self_fig(self):
g = ag.FacetGrid(self.df)
nt.assert_is_instance(g.fig, plt.Figure)
plt.close("all")
def test_self_axes(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
for ax in g.axes.flat:
nt.assert_is_instance(ax, plt.Axes)
plt.close("all")
def test_axes_array_size(self):
g1 = ag.FacetGrid(self.df)
nt.assert_equal(g1.axes.shape, (1, 1))
g2 = ag.FacetGrid(self.df, row="a")
nt.assert_equal(g2.axes.shape, (3, 1))
g3 = ag.FacetGrid(self.df, col="b")
nt.assert_equal(g3.axes.shape, (1, 2))
g4 = ag.FacetGrid(self.df, hue="c")
nt.assert_equal(g4.axes.shape, (1, 1))
g5 = ag.FacetGrid(self.df, row="a", col="b", hue="c")
nt.assert_equal(g5.axes.shape, (3, 2))
for ax in g5.axes.flat:
nt.assert_is_instance(ax, plt.Axes)
plt.close("all")
def test_single_axes(self):
g1 = ag.FacetGrid(self.df)
nt.assert_is_instance(g1.ax, plt.Axes)
g2 = ag.FacetGrid(self.df, row="a")
with nt.assert_raises(AttributeError):
g2.ax
g3 = ag.FacetGrid(self.df, col="a")
with nt.assert_raises(AttributeError):
g3.ax
g4 = ag.FacetGrid(self.df, col="a", row="b")
with nt.assert_raises(AttributeError):
g4.ax
def test_col_wrap(self):
g = ag.FacetGrid(self.df, col="d")
nt.assert_equal(g.axes.shape, (1, 10))
nt.assert_is(g.facet_axis(0, 8), g.axes[0, 8])
g_wrap = ag.FacetGrid(self.df, col="d", col_wrap=4)
nt.assert_equal(g_wrap.axes.shape, (10,))
nt.assert_is(g_wrap.facet_axis(0, 8), g_wrap.axes[8])
nt.assert_equal(g_wrap._ncol, 4)
nt.assert_equal(g_wrap._nrow, 3)
with nt.assert_raises(ValueError):
g = ag.FacetGrid(self.df, row="b", col="d", col_wrap=4)
df = self.df.copy()
df.loc[df.d == "j"] = np.nan
g_missing = ag.FacetGrid(df, col="d")
nt.assert_equal(g_missing.axes.shape, (1, 9))
g_missing_wrap = ag.FacetGrid(df, col="d", col_wrap=4)
nt.assert_equal(g_missing_wrap.axes.shape, (9,))
plt.close("all")
def test_normal_axes(self):
null = np.empty(0, object).flat
g = ag.FacetGrid(self.df)
npt.assert_array_equal(g._bottom_axes, g.axes.flat)
npt.assert_array_equal(g._not_bottom_axes, null)
npt.assert_array_equal(g._left_axes, g.axes.flat)
npt.assert_array_equal(g._not_left_axes, null)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, col="c")
npt.assert_array_equal(g._bottom_axes, g.axes.flat)
npt.assert_array_equal(g._not_bottom_axes, null)
npt.assert_array_equal(g._left_axes, g.axes[:, 0].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[:, 1:].flat)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, row="c")
npt.assert_array_equal(g._bottom_axes, g.axes[-1, :].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:-1, :].flat)
npt.assert_array_equal(g._left_axes, g.axes.flat)
npt.assert_array_equal(g._not_left_axes, null)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, col="a", row="c")
npt.assert_array_equal(g._bottom_axes, g.axes[-1, :].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:-1, :].flat)
npt.assert_array_equal(g._left_axes, g.axes[:, 0].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[:, 1:].flat)
npt.assert_array_equal(g._inner_axes, g.axes[:-1, 1:].flat)
plt.close("all")
def test_wrapped_axes(self):
null = np.empty(0, object).flat
g = ag.FacetGrid(self.df, col="a", col_wrap=2)
npt.assert_array_equal(g._bottom_axes,
g.axes[np.array([1, 2])].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:1].flat)
npt.assert_array_equal(g._left_axes, g.axes[np.array([0, 2])].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[np.array([1])].flat)
npt.assert_array_equal(g._inner_axes, null)
plt.close("all")
def test_figure_size(self):
g = ag.FacetGrid(self.df, row="a", col="b")
npt.assert_array_equal(g.fig.get_size_inches(), (6, 9))
g = ag.FacetGrid(self.df, row="a", col="b", size=6)
npt.assert_array_equal(g.fig.get_size_inches(), (12, 18))
g = ag.FacetGrid(self.df, col="c", size=4, aspect=.5)
npt.assert_array_equal(g.fig.get_size_inches(), (6, 4))
plt.close("all")
def test_figure_size_with_legend(self):
g1 = ag.FacetGrid(self.df, col="a", hue="c", size=4, aspect=.5)
npt.assert_array_equal(g1.fig.get_size_inches(), (6, 4))
g1.add_legend()
nt.assert_greater(g1.fig.get_size_inches()[0], 6)
g2 = ag.FacetGrid(self.df, col="a", hue="c", size=4, aspect=.5,
legend_out=False)
npt.assert_array_equal(g2.fig.get_size_inches(), (6, 4))
g2.add_legend()
npt.assert_array_equal(g2.fig.get_size_inches(), (6, 4))
plt.close("all")
def test_legend_data(self):
g1 = ag.FacetGrid(self.df, hue="a")
g1.map(plt.plot, "x", "y")
g1.add_legend()
palette = color_palette(n_colors=3)
nt.assert_equal(g1._legend.get_title().get_text(), "a")
a_levels = sorted(self.df.a.unique())
lines = g1._legend.get_lines()
nt.assert_equal(len(lines), len(a_levels))
for line, hue in zip(lines, palette):
nt.assert_equal(line.get_color(), hue)
labels = g1._legend.get_texts()
nt.assert_equal(len(labels), len(a_levels))
for label, level in zip(labels, a_levels):
nt.assert_equal(label.get_text(), level)
plt.close("all")
def test_legend_data_missing_level(self):
g1 = ag.FacetGrid(self.df, hue="a", hue_order=list("azbc"))
g1.map(plt.plot, "x", "y")
g1.add_legend()
b, g, r, p = color_palette(n_colors=4)
palette = [b, r, p]
nt.assert_equal(g1._legend.get_title().get_text(), "a")
a_levels = sorted(self.df.a.unique())
lines = g1._legend.get_lines()
nt.assert_equal(len(lines), len(a_levels))
for line, hue in zip(lines, palette):
nt.assert_equal(line.get_color(), hue)
labels = g1._legend.get_texts()
nt.assert_equal(len(labels), 4)
for label, level in zip(labels, list("azbc")):
nt.assert_equal(label.get_text(), level)
plt.close("all")
def test_get_boolean_legend_data(self):
self.df["b_bool"] = self.df.b == "m"
g1 = ag.FacetGrid(self.df, hue="b_bool")
g1.map(plt.plot, "x", "y")
g1.add_legend()
palette = color_palette(n_colors=2)
nt.assert_equal(g1._legend.get_title().get_text(), "b_bool")
b_levels = list(map(str, categorical_order(self.df.b_bool)))
lines = g1._legend.get_lines()
nt.assert_equal(len(lines), len(b_levels))
for line, hue in zip(lines, palette):
nt.assert_equal(line.get_color(), hue)
labels = g1._legend.get_texts()
nt.assert_equal(len(labels), len(b_levels))
for label, level in zip(labels, b_levels):
nt.assert_equal(label.get_text(), level)
plt.close("all")
def test_legend_options(self):
g1 = ag.FacetGrid(self.df, hue="b")
g1.map(plt.plot, "x", "y")
g1.add_legend()
def test_legendout_with_colwrap(self):
g = ag.FacetGrid(self.df, col="d", hue='b',
col_wrap=4, legend_out=False)
g.map(plt.plot, "x", "y", linewidth=3)
g.add_legend()
def test_subplot_kws(self):
g = ag.FacetGrid(self.df, subplot_kws=dict(axisbg="blue"))
for ax in g.axes.flat:
nt.assert_equal(ax.get_axis_bgcolor(), "blue")
@skipif(old_matplotlib)
def test_gridspec_kws(self):
ratios = [3, 1, 2]
sizes = [0.46, 0.15, 0.31]
gskws = dict(width_ratios=ratios, height_ratios=ratios)
g = ag.FacetGrid(self.df, col='c', row='a', gridspec_kws=gskws)
# clear out all ticks
for ax in g.axes.flat:
ax.set_xticks([])
ax.set_yticks([])
g.fig.tight_layout()
widths, heights = np.meshgrid(sizes, sizes)
for n, ax in enumerate(g.axes.flat):
npt.assert_almost_equal(
ax.get_position().width,
widths.flatten()[n],
decimal=2
)
npt.assert_almost_equal(
ax.get_position().height,
heights.flatten()[n],
decimal=2
)
@skipif(old_matplotlib)
def test_gridspec_kws_col_wrap(self):
ratios = [3, 1, 2, 1, 1]
sizes = [0.46, 0.15, 0.31]
gskws = dict(width_ratios=ratios)
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("always")
npt.assert_warns(UserWarning, ag.FacetGrid, self.df, col='d',
col_wrap=5, gridspec_kws=gskws)
@skipif(not old_matplotlib)
def test_gridsic_kws_old_mpl(self):
ratios = [3, 1, 2]
sizes = [0.46, 0.15, 0.31]
gskws = dict(width_ratios=ratios, height_ratios=ratios)
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("always")
npt.assert_warns(UserWarning, ag.FacetGrid, self.df, col='c',
row='a', gridspec_kws=gskws)
def test_data_generator(self):
g = ag.FacetGrid(self.df, row="a")
d = list(g.facet_data())
nt.assert_equal(len(d), 3)
tup, data = d[0]
nt.assert_equal(tup, (0, 0, 0))
nt.assert_true((data["a"] == "a").all())
tup, data = d[1]
nt.assert_equal(tup, (1, 0, 0))
nt.assert_true((data["a"] == "b").all())
g = ag.FacetGrid(self.df, row="a", col="b")
d = list(g.facet_data())
nt.assert_equal(len(d), 6)
tup, data = d[0]
nt.assert_equal(tup, (0, 0, 0))
nt.assert_true((data["a"] == "a").all())
nt.assert_true((data["b"] == "m").all())
tup, data = d[1]
nt.assert_equal(tup, (0, 1, 0))
nt.assert_true((data["a"] == "a").all())
nt.assert_true((data["b"] == "n").all())
tup, data = d[2]
nt.assert_equal(tup, (1, 0, 0))
nt.assert_true((data["a"] == "b").all())
nt.assert_true((data["b"] == "m").all())
g = ag.FacetGrid(self.df, hue="c")
d = list(g.facet_data())
nt.assert_equal(len(d), 3)
tup, data = d[1]
nt.assert_equal(tup, (0, 0, 1))
nt.assert_true((data["c"] == "u").all())
plt.close("all")
def test_map(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
g.map(plt.plot, "x", "y", linewidth=3)
lines = g.axes[0, 0].lines
nt.assert_equal(len(lines), 3)
line1, _, _ = lines
nt.assert_equal(line1.get_linewidth(), 3)
x, y = line1.get_data()
mask = (self.df.a == "a") & (self.df.b == "m") & (self.df.c == "t")
npt.assert_array_equal(x, self.df.x[mask])
npt.assert_array_equal(y, self.df.y[mask])
def test_map_dataframe(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
plot = lambda x, y, data=None, **kws: plt.plot(data[x], data[y], **kws)
g.map_dataframe(plot, "x", "y", linestyle="--")
lines = g.axes[0, 0].lines
nt.assert_equal(len(lines), 3)
line1, _, _ = lines
nt.assert_equal(line1.get_linestyle(), "--")
x, y = line1.get_data()
mask = (self.df.a == "a") & (self.df.b == "m") & (self.df.c == "t")
npt.assert_array_equal(x, self.df.x[mask])
npt.assert_array_equal(y, self.df.y[mask])
def test_set(self):
g = ag.FacetGrid(self.df, row="a", col="b")
xlim = (-2, 5)
ylim = (3, 6)
xticks = [-2, 0, 3, 5]
yticks = [3, 4.5, 6]
g.set(xlim=xlim, ylim=ylim, xticks=xticks, yticks=yticks)
for ax in g.axes.flat:
npt.assert_array_equal(ax.get_xlim(), xlim)
npt.assert_array_equal(ax.get_ylim(), ylim)
npt.assert_array_equal(ax.get_xticks(), xticks)
npt.assert_array_equal(ax.get_yticks(), yticks)
plt.close("all")
def test_set_titles(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
# Test the default titles
nt.assert_equal(g.axes[0, 0].get_title(), "a = a | b = m")
nt.assert_equal(g.axes[0, 1].get_title(), "a = a | b = n")
nt.assert_equal(g.axes[1, 0].get_title(), "a = b | b = m")
# Test a provided title
g.set_titles("{row_var} == {row_name} \/ {col_var} == {col_name}")
nt.assert_equal(g.axes[0, 0].get_title(), "a == a \/ b == m")
nt.assert_equal(g.axes[0, 1].get_title(), "a == a \/ b == n")
nt.assert_equal(g.axes[1, 0].get_title(), "a == b \/ b == m")
# Test a single row
g = ag.FacetGrid(self.df, col="b")
g.map(plt.plot, "x", "y")
# Test the default titles
nt.assert_equal(g.axes[0, 0].get_title(), "b = m")
nt.assert_equal(g.axes[0, 1].get_title(), "b = n")
# test with dropna=False
g = ag.FacetGrid(self.df, col="b", hue="b", dropna=False)
g.map(plt.plot, 'x', 'y')
plt.close("all")
def test_set_titles_margin_titles(self):
g = ag.FacetGrid(self.df, row="a", col="b", margin_titles=True)
g.map(plt.plot, "x", "y")
# Test the default titles
nt.assert_equal(g.axes[0, 0].get_title(), "b = m")
nt.assert_equal(g.axes[0, 1].get_title(), "b = n")
nt.assert_equal(g.axes[1, 0].get_title(), "")
# Test the row "titles"
nt.assert_equal(g.axes[0, 1].texts[0].get_text(), "a = a")
nt.assert_equal(g.axes[1, 1].texts[0].get_text(), "a = b")
# Test a provided title
g.set_titles(col_template="{col_var} == {col_name}")
nt.assert_equal(g.axes[0, 0].get_title(), "b == m")
nt.assert_equal(g.axes[0, 1].get_title(), "b == n")
nt.assert_equal(g.axes[1, 0].get_title(), "")
plt.close("all")
def test_set_ticklabels(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
xlab = [l.get_text() + "h" for l in g.axes[1, 0].get_xticklabels()]
ylab = [l.get_text() for l in g.axes[1, 0].get_yticklabels()]
g.set_xticklabels(xlab)
g.set_yticklabels(rotation=90)
got_x = [l.get_text() + "h" for l in g.axes[1, 1].get_xticklabels()]
got_y = [l.get_text() for l in g.axes[0, 0].get_yticklabels()]
npt.assert_array_equal(got_x, xlab)
npt.assert_array_equal(got_y, ylab)
x, y = np.arange(10), np.arange(10)
df = pd.DataFrame(np.c_[x, y], columns=["x", "y"])
g = ag.FacetGrid(df).map(pointplot, "x", "y")
g.set_xticklabels(step=2)
got_x = [int(l.get_text()) for l in g.axes[0, 0].get_xticklabels()]
npt.assert_array_equal(x[::2], got_x)
g = ag.FacetGrid(self.df, col="d", col_wrap=5)
g.map(plt.plot, "x", "y")
g.set_xticklabels(rotation=45)
g.set_yticklabels(rotation=75)
for ax in g._bottom_axes:
for l in ax.get_xticklabels():
nt.assert_equal(l.get_rotation(), 45)
for ax in g._left_axes:
for l in ax.get_yticklabels():
nt.assert_equal(l.get_rotation(), 75)
plt.close("all")
def test_set_axis_labels(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
xlab = 'xx'
ylab = 'yy'
g.set_axis_labels(xlab, ylab)
got_x = [ax.get_xlabel() for ax in g.axes[-1, :]]
got_y = [ax.get_ylabel() for ax in g.axes[:, 0]]
npt.assert_array_equal(got_x, xlab)
npt.assert_array_equal(got_y, ylab)
plt.close("all")
def test_axis_lims(self):
g = ag.FacetGrid(self.df, row="a", col="b", xlim=(0, 4), ylim=(-2, 3))
nt.assert_equal(g.axes[0, 0].get_xlim(), (0, 4))
nt.assert_equal(g.axes[0, 0].get_ylim(), (-2, 3))
plt.close("all")
def test_data_orders(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
nt.assert_equal(g.row_names, list("abc"))
nt.assert_equal(g.col_names, list("mn"))
nt.assert_equal(g.hue_names, list("tuv"))
nt.assert_equal(g.axes.shape, (3, 2))
g = ag.FacetGrid(self.df, row="a", col="b", hue="c",
row_order=list("bca"),
col_order=list("nm"),
hue_order=list("vtu"))
nt.assert_equal(g.row_names, list("bca"))
nt.assert_equal(g.col_names, list("nm"))
nt.assert_equal(g.hue_names, list("vtu"))
nt.assert_equal(g.axes.shape, (3, 2))
g = ag.FacetGrid(self.df, row="a", col="b", hue="c",
row_order=list("bcda"),
col_order=list("nom"),
hue_order=list("qvtu"))
nt.assert_equal(g.row_names, list("bcda"))
nt.assert_equal(g.col_names, list("nom"))
nt.assert_equal(g.hue_names, list("qvtu"))
nt.assert_equal(g.axes.shape, (4, 3))
plt.close("all")
def test_palette(self):
rcmod.set()
g = ag.FacetGrid(self.df, hue="c")
nt.assert_equal(g._colors, color_palette(n_colors=3))
g = ag.FacetGrid(self.df, hue="d")
nt.assert_equal(g._colors, color_palette("husl", 10))
g = ag.FacetGrid(self.df, hue="c", palette="Set2")
nt.assert_equal(g._colors, color_palette("Set2", 3))
dict_pal = dict(t="red", u="green", v="blue")
list_pal = color_palette(["red", "green", "blue"], 3)
g = ag.FacetGrid(self.df, hue="c", palette=dict_pal)
nt.assert_equal(g._colors, list_pal)
list_pal = color_palette(["green", "blue", "red"], 3)
g = ag.FacetGrid(self.df, hue="c", hue_order=list("uvt"),
palette=dict_pal)
nt.assert_equal(g._colors, list_pal)
plt.close("all")
def test_hue_kws(self):
kws = dict(marker=["o", "s", "D"])
g = ag.FacetGrid(self.df, hue="c", hue_kws=kws)
g.map(plt.plot, "x", "y")
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
nt.assert_equal(line.get_marker(), marker)
def test_dropna(self):
df = self.df.copy()
hasna = pd.Series(np.tile(np.arange(6), 10), dtype=np.float)
hasna[hasna == 5] = np.nan
df["hasna"] = hasna
g = ag.FacetGrid(df, dropna=False, row="hasna")
nt.assert_equal(g._not_na.sum(), 60)
g = ag.FacetGrid(df, dropna=True, row="hasna")
nt.assert_equal(g._not_na.sum(), 50)
plt.close("all")
@classmethod
def teardown_class(cls):
"""Ensure that all figures are closed on exit."""
plt.close("all")
class TestPairGrid(object):
rs = np.random.RandomState(sum(map(ord, "PairGrid")))
df = pd.DataFrame(dict(x=rs.normal(size=80),
y=rs.randint(0, 4, size=(80)),
z=rs.gamma(3, size=80),
a=np.repeat(list("abcd"), 20),
b=np.repeat(list("abcdefgh"), 10)))
def test_self_data(self):
g = ag.PairGrid(self.df)
nt.assert_is(g.data, self.df)
plt.close("all")
def test_ignore_datelike_data(self):
df = self.df.copy()
df['date'] = pd.date_range('2010-01-01', periods=len(df), freq='d')
result = ag.PairGrid(self.df).data
expected = df.drop('date', axis=1)
tm.assert_frame_equal(result, expected)
plt.close("all")
def test_self_fig(self):
g = ag.PairGrid(self.df)
nt.assert_is_instance(g.fig, plt.Figure)
plt.close("all")
def test_self_axes(self):
g = ag.PairGrid(self.df)
for ax in g.axes.flat:
nt.assert_is_instance(ax, plt.Axes)
plt.close("all")
def test_default_axes(self):
g = ag.PairGrid(self.df)
nt.assert_equal(g.axes.shape, (3, 3))
nt.assert_equal(g.x_vars, ["x", "y", "z"])
nt.assert_equal(g.y_vars, ["x", "y", "z"])
nt.assert_true(g.square_grid)
plt.close("all")
def test_specific_square_axes(self):
vars = ["z", "x"]
g = ag.PairGrid(self.df, vars=vars)
nt.assert_equal(g.axes.shape, (len(vars), len(vars)))
nt.assert_equal(g.x_vars, vars)
nt.assert_equal(g.y_vars, vars)
nt.assert_true(g.square_grid)
plt.close("all")
def test_specific_nonsquare_axes(self):
x_vars = ["x", "y"]
y_vars = ["z", "y", "x"]
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))
nt.assert_equal(g.x_vars, x_vars)
nt.assert_equal(g.y_vars, y_vars)
nt.assert_true(not g.square_grid)
x_vars = ["x", "y"]
y_vars = "z"
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))
nt.assert_equal(g.x_vars, list(x_vars))
nt.assert_equal(g.y_vars, list(y_vars))
nt.assert_true(not g.square_grid)
plt.close("all")
def test_specific_square_axes_with_array(self):
vars = np.array(["z", "x"])
g = ag.PairGrid(self.df, vars=vars)
nt.assert_equal(g.axes.shape, (len(vars), len(vars)))
nt.assert_equal(g.x_vars, list(vars))
nt.assert_equal(g.y_vars, list(vars))
nt.assert_true(g.square_grid)
plt.close("all")
def test_specific_nonsquare_axes_with_array(self):
x_vars = np.array(["x", "y"])
y_vars = np.array(["z", "y", "x"])
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))
nt.assert_equal(g.x_vars, list(x_vars))
nt.assert_equal(g.y_vars, list(y_vars))
nt.assert_true(not g.square_grid)
plt.close("all")
def test_size(self):
g1 = ag.PairGrid(self.df, size=3)
npt.assert_array_equal(g1.fig.get_size_inches(), (9, 9))
g2 = ag.PairGrid(self.df, size=4, aspect=.5)
npt.assert_array_equal(g2.fig.get_size_inches(), (6, 12))
g3 = ag.PairGrid(self.df, y_vars=["z"], x_vars=["x", "y"],
size=2, aspect=2)
npt.assert_array_equal(g3.fig.get_size_inches(), (8, 2))
plt.close("all")
def test_map(self):
vars = ["x", "y", "z"]
g1 = ag.PairGrid(self.df)
g1.map(plt.scatter)
for i, axes_i in enumerate(g1.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
g2 = ag.PairGrid(self.df, "a")
g2.map(plt.scatter)
for i, axes_i in enumerate(g2.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
for k, k_level in enumerate("abcd"):
x_in_k = x_in[self.df.a == k_level]
y_in_k = y_in[self.df.a == k_level]
x_out, y_out = ax.collections[k].get_offsets().T
npt.assert_array_equal(x_in_k, x_out)
npt.assert_array_equal(y_in_k, y_out)
plt.close("all")
def test_map_nonsquare(self):
x_vars = ["x"]
y_vars = ["y", "z"]
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
g.map(plt.scatter)
x_in = self.df.x
for i, i_var in enumerate(y_vars):
ax = g.axes[i, 0]
y_in = self.df[i_var]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
plt.close("all")
def test_map_lower(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_lower(plt.scatter)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.triu_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
def test_map_upper(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_upper(plt.scatter)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
@skipif(old_matplotlib)
def test_map_diag(self):
g1 = ag.PairGrid(self.df)
g1.map_diag(plt.hist)
for ax in g1.diag_axes:
nt.assert_equal(len(ax.patches), 10)
g2 = ag.PairGrid(self.df)
g2.map_diag(plt.hist, bins=15)
for ax in g2.diag_axes:
nt.assert_equal(len(ax.patches), 15)
g3 = ag.PairGrid(self.df, hue="a")
g3.map_diag(plt.hist)
for ax in g3.diag_axes:
nt.assert_equal(len(ax.patches), 40)
plt.close("all")
@skipif(old_matplotlib)
def test_map_diag_and_offdiag(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_offdiag(plt.scatter)
g.map_diag(plt.hist)
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
def test_palette(self):
rcmod.set()
g = ag.PairGrid(self.df, hue="a")
nt.assert_equal(g.palette, color_palette(n_colors=4))
g = ag.PairGrid(self.df, hue="b")
nt.assert_equal(g.palette, color_palette("husl", 8))
g = ag.PairGrid(self.df, hue="a", palette="Set2")
nt.assert_equal(g.palette, color_palette("Set2", 4))
dict_pal = dict(a="red", b="green", c="blue", d="purple")
list_pal = color_palette(["red", "green", "blue", "purple"], 4)
g = ag.PairGrid(self.df, hue="a", palette=dict_pal)
nt.assert_equal(g.palette, list_pal)
list_pal = color_palette(["purple", "blue", "red", "green"], 4)
g = ag.PairGrid(self.df, hue="a", hue_order=list("dcab"),
palette=dict_pal)
nt.assert_equal(g.palette, list_pal)
plt.close("all")
def test_hue_kws(self):
kws = dict(marker=["o", "s", "d", "+"])
g = ag.PairGrid(self.df, hue="a", hue_kws=kws)
g.map(plt.plot)
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
nt.assert_equal(line.get_marker(), marker)
g = ag.PairGrid(self.df, hue="a", hue_kws=kws,
hue_order=list("dcab"))
g.map(plt.plot)
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
nt.assert_equal(line.get_marker(), marker)
plt.close("all")
@skipif(old_matplotlib)
def test_hue_order(self):
order = list("dcab")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_diag(plt.plot)
for line, level in zip(g.axes[0, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_lower(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_upper(plt.plot)
for line, level in zip(g.axes[0, 1].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "y"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
@skipif(old_matplotlib)
def test_hue_order_missing_level(self):
order = list("dcaeb")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_diag(plt.plot)
for line, level in zip(g.axes[0, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_lower(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_upper(plt.plot)
for line, level in zip(g.axes[0, 1].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "y"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
def test_nondefault_index(self):
df = self.df.copy().set_index("b")
vars = ["x", "y", "z"]
g1 = ag.PairGrid(df)
g1.map(plt.scatter)
for i, axes_i in enumerate(g1.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
g2 = ag.PairGrid(df, "a")
g2.map(plt.scatter)
for i, axes_i in enumerate(g2.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
for k, k_level in enumerate("abcd"):
x_in_k = x_in[self.df.a == k_level]
y_in_k = y_in[self.df.a == k_level]
x_out, y_out = ax.collections[k].get_offsets().T
npt.assert_array_equal(x_in_k, x_out)
npt.assert_array_equal(y_in_k, y_out)
plt.close("all")
@skipif(old_matplotlib)
def test_pairplot(self):
vars = ["x", "y", "z"]
g = pairplot(self.df)
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
@skipif(old_matplotlib)
def test_pairplot_reg(self):
vars = ["x", "y", "z"]
g = pairplot(self.df, kind="reg")
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
@skipif(old_matplotlib)
def test_pairplot_kde(self):
vars = ["x", "y", "z"]
g = pairplot(self.df, diag_kind="kde")
for ax in g.diag_axes:
nt.assert_equal(len(ax.lines), 1)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
@skipif(old_matplotlib)
def test_pairplot_markers(self):
vars = ["x", "y", "z"]
markers = ["o", "x", "s", "d"]
g = pairplot(self.df, hue="a", vars=vars, markers=markers)
nt.assert_equal(g.hue_kws["marker"], markers)
plt.close("all")
with nt.assert_raises(ValueError):
g = pairplot(self.df, hue="a", vars=vars, markers=markers[:-2])
@classmethod
def teardown_class(cls):
"""Ensure that all figures are closed on exit."""
plt.close("all")
class TestJointGrid(object):
rs = np.random.RandomState(sum(map(ord, "JointGrid")))
x = rs.randn(100)
y = rs.randn(100)
x_na = x.copy()
x_na[10] = np.nan
x_na[20] = np.nan
data = pd.DataFrame(dict(x=x, y=y, x_na=x_na))
def test_margin_grid_from_arrays(self):
g = ag.JointGrid(self.x, self.y)
npt.assert_array_equal(g.x, self.x)
npt.assert_array_equal(g.y, self.y)
plt.close("all")
def test_margin_grid_from_series(self):
g = ag.JointGrid(self.data.x, self.data.y)
npt.assert_array_equal(g.x, self.x)
npt.assert_array_equal(g.y, self.y)
plt.close("all")
def test_margin_grid_from_dataframe(self):
g = ag.JointGrid("x", "y", self.data)
npt.assert_array_equal(g.x, self.x)
npt.assert_array_equal(g.y, self.y)
plt.close("all")
def test_margin_grid_axis_labels(self):
g = ag.JointGrid("x", "y", self.data)
xlabel, ylabel = g.ax_joint.get_xlabel(), g.ax_joint.get_ylabel()
nt.assert_equal(xlabel, "x")
nt.assert_equal(ylabel, "y")
g.set_axis_labels("x variable", "y variable")
xlabel, ylabel = g.ax_joint.get_xlabel(), g.ax_joint.get_ylabel()
nt.assert_equal(xlabel, "x variable")
nt.assert_equal(ylabel, "y variable")
plt.close("all")
def test_dropna(self):
g = ag.JointGrid("x_na", "y", self.data, dropna=False)
nt.assert_equal(len(g.x), len(self.x_na))
g = ag.JointGrid("x_na", "y", self.data, dropna=True)
nt.assert_equal(len(g.x), pd.notnull(self.x_na).sum())
plt.close("all")
def test_axlims(self):
lim = (-3, 3)
g = ag.JointGrid("x", "y", self.data, xlim=lim, ylim=lim)
nt.assert_equal(g.ax_joint.get_xlim(), lim)
nt.assert_equal(g.ax_joint.get_ylim(), lim)
nt.assert_equal(g.ax_marg_x.get_xlim(), lim)
nt.assert_equal(g.ax_marg_y.get_ylim(), lim)
def test_marginal_ticks(self):
g = ag.JointGrid("x", "y", self.data)
nt.assert_true(~len(g.ax_marg_x.get_xticks()))
nt.assert_true(~len(g.ax_marg_y.get_yticks()))
plt.close("all")
def test_bivariate_plot(self):
g = ag.JointGrid("x", "y", self.data)
g.plot_joint(plt.plot)
x, y = g.ax_joint.lines[0].get_xydata().T
npt.assert_array_equal(x, self.x)
npt.assert_array_equal(y, self.y)
plt.close("all")
def test_univariate_plot(self):
g = ag.JointGrid("x", "x", self.data)
g.plot_marginals(kdeplot)
_, y1 = g.ax_marg_x.lines[0].get_xydata().T
y2, _ = g.ax_marg_y.lines[0].get_xydata().T
npt.assert_array_equal(y1, y2)
plt.close("all")
def test_plot(self):
g = ag.JointGrid("x", "x", self.data)
g.plot(plt.plot, kdeplot)
x, y = g.ax_joint.lines[0].get_xydata().T
npt.assert_array_equal(x, self.x)
npt.assert_array_equal(y, self.x)
_, y1 = g.ax_marg_x.lines[0].get_xydata().T
y2, _ = g.ax_marg_y.lines[0].get_xydata().T
npt.assert_array_equal(y1, y2)
plt.close("all")
def test_annotate(self):
g = ag.JointGrid("x", "y", self.data)
rp = stats.pearsonr(self.x, self.y)
g.annotate(stats.pearsonr)
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, "pearsonr = %.2g; p = %.2g" % rp)
g.annotate(stats.pearsonr, stat="correlation")
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, "correlation = %.2g; p = %.2g" % rp)
def rsquared(x, y):
return stats.pearsonr(x, y)[0] ** 2
r2 = rsquared(self.x, self.y)
g.annotate(rsquared)
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, "rsquared = %.2g" % r2)
template = "{stat} = {val:.3g} (p = {p:.3g})"
g.annotate(stats.pearsonr, template=template)
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, template.format(stat="pearsonr",
val=rp[0], p=rp[1]))
plt.close("all")
def test_space(self):
g = ag.JointGrid("x", "y", self.data, space=0)
joint_bounds = g.ax_joint.bbox.bounds
marg_x_bounds = g.ax_marg_x.bbox.bounds
marg_y_bounds = g.ax_marg_y.bbox.bounds
nt.assert_equal(joint_bounds[2], marg_x_bounds[2])
nt.assert_equal(joint_bounds[3], marg_y_bounds[3])
@classmethod
def teardown_class(cls):
"""Ensure that all figures are closed on exit."""
plt.close("all")
| bsd-3-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/preprocessing/__init__.py | 268 | 1319 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
| mit |
HaoboGu/Structure-Similarity | Drugbank.py | 1 | 6376 | import random
import numpy as np
import time
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
def read_drugbank_data():
# read interaction data
interaction_file = open('data/interacts.csv')
interact_dict = {}
line = interaction_file.readline()
while line:
db_id1, db_id2, interact_level = line[0:-1].split('\t')
interact_dict[db_id1, db_id2] = int(interact_level) # use multiple keys
line = interaction_file.readline()
interaction_file.close()
# read similarity data
similarity_file = open('data/chemicalsimilarity.csv')
similarity_dict = {}
line = similarity_file.readline()
while line:
db_id1, db_id2, similarity = line[0:-1].split('\t')
similarity_dict[db_id1, db_id2] = float(similarity)
line = similarity_file.readline()
similarity_file.close()
return interact_dict, similarity_dict
class Validation:
def __init__(self, interact_dict, similarity_dict):
self.interaction = interact_dict
self.similarity = similarity_dict
self.train_set = {}
self.validation_set = {}
self.sim_link = {}
self.positive_train = {}
self.max_sim_with_positive_link = {}
self.max_sim_with_positive_link_for_val = {}
def divide_data(self):
self.train_set = {}
self.validation_set = {}
index = random.sample(range(0, 9892), 989) # randomly select 1/10 interactions as test_set
flag = 0
for i in self.interaction:
if flag in index:
self.validation_set[i] = self.interaction[i]
else:
self.train_set[i] = self.interaction[i]
flag += 1
# create known ddi dict:
for key in self.train_set:
if self.train_set[key] == 1:
self.positive_train[key] = 1
def compute_link_sim(self, key1, key2):
link_sim1 = (self.similarity[key1[0], key2[0]] + self.similarity[key1[1], key2[1]]) / 2.0
link_sim2 = (self.similarity[key1[0], key2[1]] + self.similarity[key1[1], key2[0]]) / 2.0
return max(link_sim1, link_sim2)
def create_simlink(self):
self.sim_link = {}
# num = 1
for inter_key in self.train_set:
max_link_sim = 0
for inter_key2 in self.positive_train:
if inter_key[0] in inter_key2 and inter_key[1] in inter_key2:
continue
else:
link_sim = self.compute_link_sim(inter_key, inter_key2)
if link_sim > max_link_sim:
max_link_sim = link_sim
self.sim_link[inter_key] = inter_key2
self.max_sim_with_positive_link[inter_key] = max_link_sim
# print('iter', num)
# num += 1
def create_simlink_for_val(self):
self.sim_link = {}
# num = 1
for inter_key in self.validation_set:
max_link_sim = 0
for inter_key2 in self.positive_train:
if inter_key[0] in inter_key2 and inter_key[1] in inter_key2:
continue
else:
link_sim = self.compute_link_sim(inter_key, inter_key2)
if link_sim > max_link_sim:
max_link_sim = link_sim
# self.sim_link[inter_key] = inter_key2
self.max_sim_with_positive_link_for_val[inter_key] = max_link_sim
sim_list = []
inter_list = []
for inter_key in self.validation_set:
feature = self.max_sim_with_positive_link_for_val[inter_key]
sim_list.append(feature)
inter_list.append(self.validation_set[inter_key])
return sim_list, inter_list
def create_train_array(self):
sim_list = []
inter_list = []
num = 0
for inter_key in self.train_set:
if self.train_set[inter_key] == 1:
feature = self.max_sim_with_positive_link[inter_key]
sim_list.append(feature)
inter_list.append(self.train_set[inter_key])
num += 1
print('num of positive samples in train set: ', num)
num = num * 3
for inter_key in self.train_set:
if self.train_set[inter_key] == 0:
feature = self.max_sim_with_positive_link[inter_key]
sim_list.append(feature)
inter_list.append(self.train_set[inter_key])
num = num - 1
if num == 0:
break
return sim_list, inter_list
def lr(self, sim_list, inter_list):
lr = LogisticRegression(solver='sag')
sim_list = np.array(sim_list)
sim_list = sim_list.reshape(sim_list.shape[0], 1)
inter_list = np.array(inter_list)
inter_list = inter_list.reshape(inter_list.shape[0], 1)
lr.fit(sim_list, inter_list)
val_sim, val_inter = self.create_simlink_for_val()
val_sim = np.array(val_sim)
val_sim = val_sim.reshape(val_sim.shape[0], 1)
val_inter = np.array(val_inter).reshape(val_inter.__len__(), 1)
result = lr.predict(val_sim)
prob_re = lr.predict_proba(val_sim)
prob_re = prob_re.transpose()
auroc = roc_auc_score(val_inter, prob_re[1])
print('roc score:', auroc)
return result, prob_re, val_inter
start = time.time()
interact_dict, sim_dict = read_drugbank_data()
v = Validation(interact_dict, sim_dict)
v.divide_data()
v.create_simlink()
sim_list, inter_list = v.create_train_array()
result, prob_re, val_inter = v.lr(sim_list, inter_list)
TP = 0 # predict 1, actual 1
FP = 0 # predict 1, actual 0
TN = 0 # predict 0, actual 0
FN = 0 # predict 0, actual 1
for i in range(0, 989):
if result[i] == 0 and result[i] == 0:
TN += 1
elif result[i] == 0 and val_inter[i] == 1:
FN += 1
elif result[i] == 1 and val_inter[i] == 0:
FP += 1
elif result[i] == 1 and val_inter[i] == 1:
TP += 1
print('tp:', TP, ' tn:', TN, ' fp:', FP, ' fn:', FN)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
print('precision:', precision)
print('recall:', recall)
print('f-score: ', 2 * precision * recall / (precision + recall))
end = time.time()
print(end-start) | mit |
BadrYoubiIdrissi/TIPE-Algorithme-Genetique | Source/NEAT/test.py | 1 | 2640 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 12 11:36:14 2016
@author: Badr Youbi Idrissi
"""
import pygame
import pygame.gfxdraw
import numpy as np
from pygame.locals import *
from individu import Individu
from phenotype import Phenotype
from population import Population
from datadisplay import DataDisplay
import utilitaires as ut
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
pygame.init()
screen = pygame.display.set_mode((860, 600), DOUBLEBUF and RESIZABLE)
pygame.display.set_caption("Test")
f = pygame.font.SysFont(pygame.font.get_default_font(), 20)
clock = pygame.time.Clock()
nb_e = 3
nb_s = 1
pop = Population(10, nb_e, nb_s)
pop.generer()
status = DataDisplay((0,0), padding = 20)
status.add("FPS", lambda : clock.get_fps())
status.add("Current generation", lambda : pop.generationCount)
status.add("Number of species", lambda : len(pop.especes))
status.add("Best fitness", pop.getBestFitness)
status.add("Best shared fitness", pop.getBestSharedFitness)
status.add("Average fitness", lambda : pop.averageFitness)
evol = False
while True:
clock.tick()
screen.fill((255,255,255))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
elif event.type == KEYDOWN and event.key == K_UP:
nbPoints = 100
X,Y = np.meshgrid(np.linspace(0,1,nbPoints),np.linspace(0,1,nbPoints))
Z = np.zeros((nbPoints,nbPoints))
for i in range(nbPoints):
for j in range(nbPoints):
pop.best[-1].phenotype.evaluate(ut.entree('1;'+str(X[i,j])+';'+str(Y[i,j])))
Z[i,j] = pop.best[-1].output()
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z)
plt.show()
elif event.type == KEYDOWN and event.key == K_DOWN:
l = [pop.contenu[i].fitness for i in range(pop.length)]
l2 = [pop.contenu[i].sharedFitness for i in range(pop.length)]
plt.plot(range(pop.length), l)
plt.plot(range(pop.length), l2)
plt.show()
elif event.type == KEYDOWN and event.key == K_e:
evol = not(evol)
elif event.type == VIDEORESIZE:
pygame.display.set_mode((event.w, event.h), DOUBLEBUF and RESIZABLE)
if evol:
pop.evoluer()
if (pop.generationCount % 10 == 0):
pop.updateBest()
pop.draw(status.police)
status.draw()
pygame.display.flip()
| gpl-3.0 |
gef756/scipy | scipy/interpolate/interpolate.py | 25 | 80287 | """ Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange', 'PPoly', 'BPoly', 'RegularGridInterpolator',
'interpn']
import itertools
from numpy import (shape, sometrue, array, transpose, searchsorted,
ones, logical_or, atleast_1d, atleast_2d, ravel,
dot, poly1d, asarray, intp)
import numpy as np
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
import math
import warnings
import functools
import operator
from scipy._lib.six import xrange, integer_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
def reduce_sometrue(a):
all = a
while len(shape(all)) > 1:
all = sometrue(all, axis=0)
return all
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : numpy.poly1d instance
The Lagrange interpolating polynomial.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'
where 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of first, second or third order) or as an integer
specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised.
fill_value : float, optional
If provided, then this value will be used to fill in for requested
points outside of the data range. If not provided, then the default
is NaN.
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=True, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.copy = copy
self.bounds_error = bounds_error
self.fill_value = fill_value
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest': 0, 'zero': 0,'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
y = self._reshape_yi(y)
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
self.x_bds = (x[1:] + x[:-1]) / 2.0
self._call = self.__class__._call_nearest
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
self._spline = splmake(x, y, order=order)
self._call = self.__class__._call_spline
if len(x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
self._kind = kind
self.x = x
self._y = y
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return spleval(self._spline, x_new)
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
out_of_bounds = self._check_bounds(x_new)
y_new = self._call(self, x_new)
if len(y_new) > 0:
y_new[out_of_bounds] = self.fill_value
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
out_of_bounds = logical_or(below_bounds, above_bounds)
return out_of_bounds
class _PPolyBase(object):
"""
Base class for piecewise polynomials.
"""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("%s must be between 0 and %s" % (axis, c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
if np.any(self.x[1:] - self.x[:-1] < 0):
raise ValueError("x-coordinates are not in increasing order")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=True):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals
``self.x[-1] <= x < x_right[0]``, ``x_right[0] <= x < x_right[1]``,
..., ``x_right[m-2] <= x < x_right[m-1]``
x : ndarray, size (m,)
Additional breakpoints. Must be sorted and either to
the right or to the left of the current breakpoints.
right : bool, optional
Whether the new intervals are to the right or to the left
of the current intervals.
"""
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if right:
if x[0] < self.x[-1]:
raise ValueError("new x are not to the right of current ones")
else:
if x[-1] > self.x[0]:
raise ValueError("new x are not to the left of current ones")
if c.size == 0:
return
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if right:
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
else:
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ith interval is ``x[i] <= xp < x[i+1]``::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial. This representation
is the local power basis.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu,:].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivativative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. (Default: 1)
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
# construct a compatible polynomial
return self.construct_fast(c, self.x, self.extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
# Compute the integral
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.integrate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate),
out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : bool, optional
Whether to return roots from the polynomial extrapolated
based on first and last intervals.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep`
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**(a) * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ``i``-th interval ``x[i] <= xp < x[i+1]``
is written in the Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1))
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = comb(k, a) * t**k * (1 - t)**(k - a)
with ``t = (x - x[i]) / (x[i+1] - x[i])``.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, doi:10.1155/2011/829543
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k2 = k - nu representing the derivative
of this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k2 = k + nu representing the
antiderivative of this polynomial.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the breakpoint)
# Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k,:], axis=0)[:-1]
return self.construct_fast(c2, x, self.extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Defaults to ``self.extrapolate``.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is not None:
ib.extrapolate = extrapolate
return ib(b) - ib(a)
def extend(self, c, x, right=True):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, integer_types):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
raise ValueError("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" %
(xi[i], len(y1), xi[i+1], len(y2), orders[i]))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1], y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating)
or np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x,y,z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype')
and not np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices, norm_distances, out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices, norm_distances, out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
def _setdiag(a, k, v):
if not a.ndim == 2:
raise ValueError("Input array should be 2-D.")
M,N = a.shape
if k > 0:
start = k
num = N - k
else:
num = M + k
start = abs(k)*N
end = start + num*(N+1)-1
a.flat[start:end:(N+1)] = v
# Return the spline that minimizes the dis-continuity of the
# "order-th" derivative; for order >= 2.
def _find_smoothest2(xk, yk):
N = len(xk) - 1
Np1 = N + 1
# find pseudo-inverse of B directly.
Bd = np.empty((Np1, N))
for k in range(-N,N):
if (k < 0):
l = np.arange(-k, Np1)
v = (l+k+1)
if ((k+1) % 2):
v = -v
else:
l = np.arange(k,N)
v = N - l
if ((k % 2)):
v = -v
_setdiag(Bd, k, v)
Bd /= (Np1)
V2 = np.ones((Np1,))
V2[1::2] = -1
V2 /= math.sqrt(Np1)
dk = np.diff(xk)
b = 2*np.diff(yk, axis=0)/dk
J = np.zeros((N-1,N+1))
idk = 1.0/dk
_setdiag(J,0,idk[:-1])
_setdiag(J,1,-idk[1:]-idk[:-1])
_setdiag(J,2,idk[1:])
A = dot(J.T,J)
val = dot(V2,dot(A,V2))
res1 = dot(np.outer(V2,V2)/val,A)
mk = dot(np.eye(Np1)-res1, _dot0(Bd,b))
return mk
def _get_spline2_Bb(xk, yk, kind, conds):
Np1 = len(xk)
dk = xk[1:]-xk[:-1]
if kind == 'not-a-knot':
# use banded-solver
nlu = (1,1)
B = ones((3,Np1))
alpha = 2*(yk[1:]-yk[:-1])/dk
zrs = np.zeros((1,)+yk.shape[1:])
row = (Np1-1)//2
b = np.concatenate((alpha[:row],zrs,alpha[row:]),axis=0)
B[0,row+2:] = 0
B[2,:(row-1)] = 0
B[0,row+1] = dk[row-1]
B[1,row] = -dk[row]-dk[row-1]
B[2,row-1] = dk[row]
return B, b, None, nlu
else:
raise NotImplementedError("quadratic %s is not available" % kind)
def _get_spline3_Bb(xk, yk, kind, conds):
# internal function to compute different tri-diagonal system
# depending on the kind of spline requested.
# conds is only used for 'second' and 'first'
Np1 = len(xk)
if kind in ['natural', 'second']:
if kind == 'natural':
m0, mN = 0.0, 0.0
else:
m0, mN = conds
# the matrix to invert is (N-1,N-1)
# use banded solver
beta = 2*(xk[2:]-xk[:-2])
alpha = xk[1:]-xk[:-1]
nlu = (1,1)
B = np.empty((3,Np1-2))
B[0,1:] = alpha[2:]
B[1,:] = beta
B[2,:-1] = alpha[1:-1]
dyk = yk[1:]-yk[:-1]
b = (dyk[1:]/alpha[1:] - dyk[:-1]/alpha[:-1])
b *= 6
b[0] -= m0
b[-1] -= mN
def append_func(mk):
# put m0 and mN into the correct shape for
# concatenation
ma = array(m0,copy=0,ndmin=yk.ndim)
mb = array(mN,copy=0,ndmin=yk.ndim)
if ma.shape[1:] != yk.shape[1:]:
ma = ma*(ones(yk.shape[1:])[np.newaxis,...])
if mb.shape[1:] != yk.shape[1:]:
mb = mb*(ones(yk.shape[1:])[np.newaxis,...])
mk = np.concatenate((ma,mk),axis=0)
mk = np.concatenate((mk,mb),axis=0)
return mk
return B, b, append_func, nlu
elif kind in ['clamped', 'endslope', 'first', 'not-a-knot', 'runout',
'parabolic']:
if kind == 'endslope':
# match slope of lagrange interpolating polynomial of
# order 3 at end-points.
x0,x1,x2,x3 = xk[:4]
sl_0 = (1./(x0-x1)+1./(x0-x2)+1./(x0-x3))*yk[0]
sl_0 += (x0-x2)*(x0-x3)/((x1-x0)*(x1-x2)*(x1-x3))*yk[1]
sl_0 += (x0-x1)*(x0-x3)/((x2-x0)*(x2-x1)*(x3-x2))*yk[2]
sl_0 += (x0-x1)*(x0-x2)/((x3-x0)*(x3-x1)*(x3-x2))*yk[3]
xN3,xN2,xN1,xN0 = xk[-4:]
sl_N = (1./(xN0-xN1)+1./(xN0-xN2)+1./(xN0-xN3))*yk[-1]
sl_N += (xN0-xN2)*(xN0-xN3)/((xN1-xN0)*(xN1-xN2)*(xN1-xN3))*yk[-2]
sl_N += (xN0-xN1)*(xN0-xN3)/((xN2-xN0)*(xN2-xN1)*(xN3-xN2))*yk[-3]
sl_N += (xN0-xN1)*(xN0-xN2)/((xN3-xN0)*(xN3-xN1)*(xN3-xN2))*yk[-4]
elif kind == 'clamped':
sl_0, sl_N = 0.0, 0.0
elif kind == 'first':
sl_0, sl_N = conds
# Now set up the (N+1)x(N+1) system of equations
beta = np.r_[0,2*(xk[2:]-xk[:-2]),0]
alpha = xk[1:]-xk[:-1]
gamma = np.r_[0,alpha[1:]]
B = np.diag(alpha,k=-1) + np.diag(beta) + np.diag(gamma,k=1)
d1 = alpha[0]
dN = alpha[-1]
if kind == 'not-a-knot':
d2 = alpha[1]
dN1 = alpha[-2]
B[0,:3] = [d2,-d1-d2,d1]
B[-1,-3:] = [dN,-dN1-dN,dN1]
elif kind == 'runout':
B[0,:3] = [1,-2,1]
B[-1,-3:] = [1,-2,1]
elif kind == 'parabolic':
B[0,:2] = [1,-1]
B[-1,-2:] = [-1,1]
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
B[0,:2] = [2*d1,d1]
B[-1,-2:] = [dN,2*dN]
# Set up RHS (b)
b = np.empty((Np1,)+yk.shape[1:])
dyk = (yk[1:]-yk[:-1])*1.0
if kind in ['not-a-knot', 'runout', 'parabolic']:
b[0] = b[-1] = 0.0
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
b[0] = (dyk[0]/d1 - sl_0)
b[-1] = -(dyk[-1]/dN - sl_N)
b[1:-1,...] = (dyk[1:]/alpha[1:]-dyk[:-1]/alpha[:-1])
b *= 6.0
return B, b, None, None
else:
raise ValueError("%s not supported" % kind)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# If conds is None, then use the not_a_knot condition
# at K-1 farthest separated points in the interval
def _find_not_a_knot(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued second
# derivative at K-1 farthest separated points
def _find_natural(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued first
# derivative at K-1 farthest separated points
def _find_clamped(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def _find_fixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then use coefficient periodicity
# If conds is 'function' then use function periodicity
def _find_periodic(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# Doesn't use conds
def _find_symmetric(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# conds is a dictionary with multiple values
def _find_mixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj,cvals,k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),)+index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj,cvals.real[sl],k,deriv)
res[sl].imag = _fitpack._bspleval(xx,xj,cvals.imag[sl],k,deriv)
else:
res[sl] = _fitpack._bspleval(xx,xj,cvals[sl],k,deriv)
res.shape = oldshape + sh
return res
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple.
"""
return ppform.fromspline(xk, cvals, k)
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk,yk,order=order,kind=kind,conds=conds),xnew)
| bsd-3-clause |
McIntyre-Lab/papers | newman_events_2017/python_workflow/programs/build_intron2border_junction_index.py | 1 | 5945 | #!/usr/bin/env python3
#######################################################################################################################
#
# DATE: 2017-12-15
# NAME: build_Event2Transcript_index.py
# AUTHOR: Jeremy R. B. Newman ([email protected])
#
# DESCRIPTION: This script creates an intron-to-border junction index file used by Event Analysis to report
# the read coverage of introns, their associated border junctions and flanking exonic regions (fusions), to aid
# the user in deciding whether there is evidence on intron retention, alternative/novel splice usage, etc.
# It takes the annotation CSVs for junctions, exonic regions and introns to assemble a complete intron/border index,
# where each border junction and intron are assigned to a single intron event, flanked by its neighboring
# exonic regions. Where the exonic regions of intron events can be assigned to multiple genes, then the output of this
# intron event is suppressed, to avoid instances of overlapping intron events.
#
# REQUIRED PACKAGES: pandas (tested with v0.19.2)
# argparse (tested with v1.1)
# logging (tested with v0.5.1.2)
#
#######################################################################################################################
# Import required packages
import pandas as pd
import logging
import argparse
import sqlite3
def getOptions():
# Parse command line arguments
parser = argparse.ArgumentParser(description="Generate an intron-to-border-junction index file for"
"interpreting read coverage of intronic regions")
parser.add_argument('--intron-annotation-file', dest="inIntrons", required=True, help="Input intron annotation CSV")
parser.add_argument("--junction-annotation-file", dest="inJunctions", required=True,
help="Input junction annotation CSV")
parser.add_argument("--output-intron-index", dest="outCSV", required=True,
help="Output event index CSV")
args = parser.parse_args()
return args
def main():
# Connect to SQL database
con = sqlite3.connect(":memory:")
cur = con.cursor()
# Import intron and junction annotations
logger.info("Importing intron and junction annotations")
intronDF = pd.read_csv(args.inIntrons, usecols=('intron_id','chr','intron_start','intron_stop','gene_id',
'exonic_region_id_5prime','exonic_region_id_3prime'))
juncDF = pd.read_csv(args.inJunctions, usecols=('junction_id','chr','donor_stop','acceptor_start','transcript_id',
'gene_id','flag_border_junction'))
# Convert to SQL tables
intronDF.to_sql("intronInfo", con, if_exists="replace")
juncDF.to_sql("juncInfo", con, if_exists="replace")
# So border junctions and introns can be merged, donor_stop and acceptor start need to renamed to intron_start
# and intron_stop respectively. When the "donor exon" is an intron, donor_stop = intron_stop
# When the "acceptor exon" is an intron, acceptor_start = intron_start
# I am going to first map 5' border junctions to the 5' end of introns, then 3'
# border junctions for the 3' end of the introns.
# First, I want to expand concatenated gene IDs. Junctions with multiple gene ID shouldn't be retained in the
# final output, but iterate over these for completeness
cur.execute("""Select junction_id, chr , donor_stop , acceptor_start , gene_id from juncInfo WHERE flag_border_junction = 1""")
allBorders = cur.fetchall()
cur.execute("""CREATE TABLE IF NOT EXISTS borderInfo
(junction_id TEXT, chr TEXT, donor_stop INT, acceptor_start INT, gene_id TEXT)""")
for border in allBorders:
genes = border[4].split("|")
for gn in genes:
cur.execute("INSERT INTO borderInfo VALUES(:junction_id, :chr, :donor_stop, :acceptor_start, :gene_id)",
{"junction_id": border[0], "chr": border[1], "donor_stop": border[2],
"acceptor_start": border[3], "gene_id":gn})
# Merge INNER with intron table on chromosome, gene, and acceptor_start (as intron_start)
cur.execute("CREATE TABLE intronWstart AS SELECT in1.intron_id, in1.chr, in1.intron_start, in1.intron_stop, "
"in1.gene_id, in1.exonic_region_id_5prime, in2.junction_id AS border_junction_id_5prime "
"FROM intronInfo in1 INNER JOIN borderInfo in2 "
"ON in1.chr = in2.chr AND in1.gene_id = in2.gene_id AND in1.intron_start = in2.acceptor_start ;")
# Merge INNER with intron table on chromosome, gene, and donor_stop (as intron_stop)
cur.execute("CREATE TABLE intronWstop AS SELECT in1.intron_id, in1.chr, in1.gene_id, "
"in1.exonic_region_id_3prime, in2.junction_id AS border_junction_id_3prime "
"FROM intronInfo in1 INNER JOIN borderInfo in2 "
"ON in1.chr = in2.chr AND in1.gene_id = in2.gene_id AND in1.intron_stop = in2.donor_stop ;")
cur.execute("CREATE TABLE intronBorderIndex AS SELECT in1.*, in2.exonic_region_id_3prime,"
"in2.border_junction_id_3prime FROM intronWstart in1 "
"INNER JOIN intronWstop in2 ON in1.gene_id = in2.gene_id AND in1.intron_id = in2.intron_id ;")
intronBorderIndexDF = pd.read_sql("SELECT * FROM intronBorderIndex ORDER BY chr, intron_start, intron_stop ;", con)
# Write output index
with open(args.outCSV, 'w') as outIndex:
intronBorderIndexDF.to_csv(outIndex, encoding='utf-8', index=False)
if __name__ == '__main__':
# Parse command line arguments
global args
args = getOptions()
# Setting up logger
logger = logging.getLogger()
logger.info('Starting script')
# Calling main script
main()
logger.info('Script complete: index created!') | lgpl-3.0 |
LooseTerrifyingSpaceMonkey/DecMeg2014 | src/benchmark_pooling.py | 1 | 3692 | """DecMeg2014 example code.
Simple prediction of the class labels of the test set by:
- pooling all the triaining trials of all subjects in one dataset.
- Extracting the MEG data in the first 500ms from when the
stimulus starts.
- Using a linear classifier (logistic regression).
"""
import numpy as np
from sklearn.linear_model import LogisticRegression
from scipy.io import loadmat
def create_features(XX, tmin, tmax, sfreq, tmin_original=-0.5):
"""Creation of the feature space:
- restricting the time window of MEG data to [tmin, tmax]sec.
- Concatenating the 306 timeseries of each trial in one long
vector.
- Normalizing each feature independently (z-scoring).
"""
print "Applying the desired time window."
beginning = np.round((tmin - tmin_original) * sfreq).astype(np.int)
end = np.round((tmax - tmin_original) * sfreq).astype(np.int)
XX = XX[:, :, beginning:end].copy()
print "2D Reshaping: concatenating all 306 timeseries."
XX = XX.reshape(XX.shape[0], XX.shape[1] * XX.shape[2])
print "Features Normalization."
XX -= XX.mean(0)
XX = np.nan_to_num(XX / XX.std(0))
return XX
if __name__ == '__main__':
print "DecMeg2014: https://www.kaggle.com/c/decoding-the-human-brain"
print
subjects_train = range(1, 7) # use range(1, 17) for all subjects
print "Training on subjects", subjects_train
# We throw away all the MEG data outside the first 0.5sec from when
# the visual stimulus start:
tmin = 0.0
tmax = 0.500
print "Restricting MEG data to the interval [%s, %s]sec." % (tmin, tmax)
X_train = []
y_train = []
X_test = []
ids_test = []
print
print "Creating the trainset."
for subject in subjects_train:
filename = '../data/mat/train_subject%02d.mat' % subject
print "Loading", filename
data = loadmat(filename, squeeze_me=True)
XX = data['X']
yy = data['y']
sfreq = data['sfreq']
tmin_original = data['tmin']
print "Dataset summary:"
print "XX:", XX.shape
print "yy:", yy.shape
print "sfreq:", sfreq
XX = create_features(XX, tmin, tmax, sfreq)
X_train.append(XX)
y_train.append(yy)
X_train = np.vstack(X_train)
y_train = np.concatenate(y_train)
print "Trainset:", X_train.shape
print
print "Creating the testset."
subjects_test = range(17, 24)
for subject in subjects_test:
filename = '../data/mat/test_subject%02d.mat' % subject
print "Loading", filename
data = loadmat(filename, squeeze_me=True)
XX = data['X']
ids = data['Id']
sfreq = data['sfreq']
tmin_original = data['tmin']
print "Dataset summary:"
print "XX:", XX.shape
print "ids:", ids.shape
print "sfreq:", sfreq
XX = create_features(XX, tmin, tmax, sfreq)
X_test.append(XX)
ids_test.append(ids)
X_test = np.vstack(X_test)
ids_test = np.concatenate(ids_test)
print "Testset:", X_test.shape
print
clf = LogisticRegression(random_state=0) # Beware! You need 10Gb RAM to train LogisticRegression on all 16 subjects!
print "Classifier:"
print clf
print "Training."
clf.fit(X_train, y_train)
print "Predicting."
y_pred = clf.predict(X_test)
print
filename_submission = "../output/submissionBenchmarkPooling25s.csv"
print "Creating submission file", filename_submission
f = open(filename_submission, "w")
print >> f, "Id,Prediction"
for i in range(len(y_pred)):
print >> f, str(ids_test[i]) + "," + str(y_pred[i])
f.close()
print "Done." | gpl-2.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/io/parser/test_comment.py | 2 | 3819 | """
Tests that comments are properly handled during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import numpy as np
import pytest
from pandas import DataFrame
import pandas.util.testing as tm
@pytest.mark.parametrize("na_values", [None, ["NaN"]])
def test_comment(all_parsers, na_values):
parser = all_parsers
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = DataFrame(
[[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]
)
result = parser.read_csv(StringIO(data), comment="#", na_values=na_values)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"read_kwargs", [dict(), dict(lineterminator="*"), dict(delim_whitespace=True)]
)
def test_line_comment(all_parsers, read_kwargs):
parser = all_parsers
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
if read_kwargs.get("delim_whitespace"):
data = data.replace(",", " ")
elif read_kwargs.get("lineterminator"):
if parser.engine != "c":
pytest.skip("Custom terminator not supported with Python engine")
data = data.replace("\n", read_kwargs.get("lineterminator"))
read_kwargs["comment"] = "#"
result = parser.read_csv(StringIO(data), **read_kwargs)
expected = DataFrame(
[[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]
)
tm.assert_frame_equal(result, expected)
def test_comment_skiprows(all_parsers):
parser = all_parsers
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
# This should ignore the first four lines (including comments).
expected = DataFrame(
[[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]
)
result = parser.read_csv(StringIO(data), comment="#", skiprows=4)
tm.assert_frame_equal(result, expected)
def test_comment_header(all_parsers):
parser = all_parsers
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
# Header should begin at the second non-comment line.
expected = DataFrame(
[[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]
)
result = parser.read_csv(StringIO(data), comment="#", header=1)
tm.assert_frame_equal(result, expected)
def test_comment_skiprows_header(all_parsers):
parser = all_parsers
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
# Skiprows should skip the first 4 lines (including comments),
# while header should start from the second non-commented line,
# starting with line 5.
expected = DataFrame(
[[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]
)
result = parser.read_csv(StringIO(data), comment="#", skiprows=4, header=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("comment_char", ["#", "~", "&", "^", "*", "@"])
def test_custom_comment_char(all_parsers, comment_char):
parser = all_parsers
data = "a,b,c\n1,2,3#ignore this!\n4,5,6#ignorethistoo"
result = parser.read_csv(
StringIO(data.replace("#", comment_char)), comment=comment_char
)
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "b", "c"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("header", ["infer", None])
def test_comment_first_line(all_parsers, header):
# see gh-4623
parser = all_parsers
data = "# notes\na,b,c\n# more notes\n1,2,3"
if header is None:
expected = DataFrame({0: ["a", "1"], 1: ["b", "2"], 2: ["c", "3"]})
else:
expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"])
result = parser.read_csv(StringIO(data), comment="#", header=header)
tm.assert_frame_equal(result, expected)
| apache-2.0 |
pnedunuri/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
liu-jc/reinforcement-learning | lib/plotting.py | 4 | 3457 | import matplotlib
import numpy as np
import pandas as pd
from collections import namedtuple
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
EpisodeStats = namedtuple("Stats",["episode_lengths", "episode_rewards"])
def plot_cost_to_go_mountain_car(env, estimator, num_tiles=20):
x = np.linspace(env.observation_space.low[0], env.observation_space.high[0], num=num_tiles)
y = np.linspace(env.observation_space.low[1], env.observation_space.high[1], num=num_tiles)
X, Y = np.meshgrid(x, y)
Z = np.apply_along_axis(lambda _: -np.max(estimator.predict(_)), 2, np.dstack([X, Y]))
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0)
ax.set_xlabel('Position')
ax.set_ylabel('Velocity')
ax.set_zlabel('Value')
ax.set_title("Mountain \"Cost To Go\" Function")
fig.colorbar(surf)
plt.show()
def plot_value_function(V, title="Value Function"):
"""
Plots the value function as a surface plot.
"""
min_x = min(k[0] for k in V.keys())
max_x = max(k[0] for k in V.keys())
min_y = min(k[1] for k in V.keys())
max_y = max(k[1] for k in V.keys())
x_range = np.arange(min_x, max_x + 1)
y_range = np.arange(min_y, max_y + 1)
X, Y = np.meshgrid(x_range, y_range)
# Find value for all (x, y) coordinates
Z_noace = np.apply_along_axis(lambda _: V[(_[0], _[1], False)], 2, np.dstack([X, Y]))
Z_ace = np.apply_along_axis(lambda _: V[(_[0], _[1], True)], 2, np.dstack([X, Y]))
def plot_surface(X, Y, Z, title):
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0)
ax.set_xlabel('Player Sum')
ax.set_ylabel('Dealer Showing')
ax.set_zlabel('Value')
ax.set_title(title)
ax.view_init(ax.elev, -120)
fig.colorbar(surf)
plt.show()
plot_surface(X, Y, Z_noace, "{} (No Usable Ace)".format(title))
plot_surface(X, Y, Z_ace, "{} (Usable Ace)".format(title))
def plot_episode_stats(stats, smoothing_window=10, noshow=False):
# Plot the episode length over time
fig1 = plt.figure(figsize=(10,5))
plt.plot(stats.episode_lengths)
plt.xlabel("Episode")
plt.ylabel("Episode Length")
plt.title("Episode Length over Time")
if noshow:
plt.close(fig1)
else:
plt.show(fig1)
# Plot the episode reward over time
fig2 = plt.figure(figsize=(10,5))
rewards_smoothed = pd.Series(stats.episode_rewards).rolling(smoothing_window, min_periods=smoothing_window).mean()
plt.plot(rewards_smoothed)
plt.xlabel("Episode")
plt.ylabel("Episode Reward (Smoothed)")
plt.title("Episode Reward over Time (Smoothed over window size {})".format(smoothing_window))
if noshow:
plt.close(fig2)
else:
plt.show(fig2)
# Plot time steps and episode number
fig3 = plt.figure(figsize=(10,5))
plt.plot(np.cumsum(stats.episode_lengths), np.arange(len(stats.episode_lengths)))
plt.xlabel("Time Steps")
plt.ylabel("Episode")
plt.title("Episode per time step")
if noshow:
plt.close(fig3)
else:
plt.show(fig3)
return fig1, fig2, fig3
| mit |
r-mart/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
ifuding/Kaggle | ADDC/Code/BarisKanber.py | 3 | 14070 | """
A non-blending lightGBM model that incorporates portions and ideas from various public kernels
This kernel gives LB: 0.977 when the parameter 'debug' below is set to 0 but this implementation requires a machine with ~32 GB of memory
"""
import pandas as pd
import time
import numpy as np
from sklearn.cross_validation import train_test_split
import lightgbm as lgb
import gc
import matplotlib.pyplot as plt
import os
debug=1
if debug:
print('*** debug parameter set: this is a test run for debugging purposes ***')
def lgb_modelfit_nocv(params, dtrain, dvalid, predictors, target='target', objective='binary', metrics='auc',
feval=None, early_stopping_rounds=20, num_boost_round=3000, verbose_eval=10, categorical_features=None):
lgb_params = {
'boosting_type': 'gbdt',
'objective': objective,
'metric':metrics,
'learning_rate': 0.2,
#'is_unbalance': 'true', #because training data is unbalance (replaced with scale_pos_weight)
'num_leaves': 31, # we should let it be smaller than 2^(max_depth)
'max_depth': -1, # -1 means no limit
'min_child_samples': 20, # Minimum number of data need in a child(min_data_in_leaf)
'max_bin': 255, # Number of bucketed bin for feature values
'subsample': 0.6, # Subsample ratio of the training instance.
'subsample_freq': 0, # frequence of subsample, <=0 means no enable
'colsample_bytree': 0.3, # Subsample ratio of columns when constructing each tree.
'min_child_weight': 5, # Minimum sum of instance weight(hessian) needed in a child(leaf)
'subsample_for_bin': 200000, # Number of samples for constructing bin
'min_split_gain': 0, # lambda_l1, lambda_l2 and min_gain_to_split to regularization
'reg_alpha': 0, # L1 regularization term on weights
'reg_lambda': 0, # L2 regularization term on weights
'nthread': 4,
'verbose': 0,
'metric':metrics
}
lgb_params.update(params)
print("preparing validation datasets")
xgtrain = lgb.Dataset(dtrain[predictors].values, label=dtrain[target].values,
feature_name=predictors,
categorical_feature=categorical_features
)
xgvalid = lgb.Dataset(dvalid[predictors].values, label=dvalid[target].values,
feature_name=predictors,
categorical_feature=categorical_features
)
evals_results = {}
bst1 = lgb.train(lgb_params,
xgtrain,
valid_sets=[xgtrain, xgvalid],
valid_names=['train','valid'],
evals_result=evals_results,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=10,
feval=feval)
print("\nModel Report")
print("bst1.best_iteration: ", bst1.best_iteration)
print(metrics+":", evals_results['valid'][metrics][bst1.best_iteration-1])
return (bst1,bst1.best_iteration)
def DO(frm,to,fileno):
dtypes = {
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint16',
'os' : 'uint16',
'channel' : 'uint16',
'is_attributed' : 'uint8',
'click_id' : 'uint32',
}
print('loading train data...',frm,to)
train_df = pd.read_csv("../input/train.csv", parse_dates=['click_time'], skiprows=range(1,frm), nrows=to-frm, dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'is_attributed'])
print('loading test data...')
if debug:
test_df = pd.read_csv("../input/test.csv", nrows=100000, parse_dates=['click_time'], dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'click_id'])
else:
test_df = pd.read_csv("../input/test.csv", parse_dates=['click_time'], dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'click_id'])
len_train = len(train_df)
train_df=train_df.append(test_df)
del test_df
gc.collect()
print('Extracting new features...')
train_df['hour'] = pd.to_datetime(train_df.click_time).dt.hour.astype('uint8')
train_df['day'] = pd.to_datetime(train_df.click_time).dt.day.astype('uint8')
gc.collect()
naddfeat=9
for i in range(0,naddfeat):
if i==0: selcols=['ip', 'channel']; QQ=4;
if i==1: selcols=['ip', 'device', 'os', 'app']; QQ=5;
if i==2: selcols=['ip', 'day', 'hour']; QQ=4;
if i==3: selcols=['ip', 'app']; QQ=4;
if i==4: selcols=['ip', 'app', 'os']; QQ=4;
if i==5: selcols=['ip', 'device']; QQ=4;
if i==6: selcols=['app', 'channel']; QQ=4;
if i==7: selcols=['ip', 'os']; QQ=5;
if i==8: selcols=['ip', 'device', 'os', 'app']; QQ=4;
print('selcols',selcols,'QQ',QQ)
filename='X%d_%d_%d.csv'%(i,frm,to)
if os.path.exists(filename):
if QQ==5:
gp=pd.read_csv(filename,header=None)
train_df['X'+str(i)]=gp
else:
gp=pd.read_csv(filename)
train_df = train_df.merge(gp, on=selcols[0:len(selcols)-1], how='left')
else:
if QQ==0:
gp = train_df[selcols].groupby(by=selcols[0:len(selcols)-1])[selcols[len(selcols)-1]].count().reset_index().\
rename(index=str, columns={selcols[len(selcols)-1]: 'X'+str(i)})
train_df = train_df.merge(gp, on=selcols[0:len(selcols)-1], how='left')
if QQ==1:
gp = train_df[selcols].groupby(by=selcols[0:len(selcols)-1])[selcols[len(selcols)-1]].mean().reset_index().\
rename(index=str, columns={selcols[len(selcols)-1]: 'X'+str(i)})
train_df = train_df.merge(gp, on=selcols[0:len(selcols)-1], how='left')
if QQ==2:
gp = train_df[selcols].groupby(by=selcols[0:len(selcols)-1])[selcols[len(selcols)-1]].var().reset_index().\
rename(index=str, columns={selcols[len(selcols)-1]: 'X'+str(i)})
train_df = train_df.merge(gp, on=selcols[0:len(selcols)-1], how='left')
if QQ==3:
gp = train_df[selcols].groupby(by=selcols[0:len(selcols)-1])[selcols[len(selcols)-1]].skew().reset_index().\
rename(index=str, columns={selcols[len(selcols)-1]: 'X'+str(i)})
train_df = train_df.merge(gp, on=selcols[0:len(selcols)-1], how='left')
if QQ==4:
gp = train_df[selcols].groupby(by=selcols[0:len(selcols)-1])[selcols[len(selcols)-1]].nunique().reset_index().\
rename(index=str, columns={selcols[len(selcols)-1]: 'X'+str(i)})
train_df = train_df.merge(gp, on=selcols[0:len(selcols)-1], how='left')
if QQ==5:
gp = train_df[selcols].groupby(by=selcols[0:len(selcols)-1])[selcols[len(selcols)-1]].cumcount()
train_df['X'+str(i)]=gp.values
if not debug:
gp.to_csv(filename,index=False)
del gp
gc.collect()
print('doing nextClick')
predictors=[]
new_feature = 'nextClick'
filename='nextClick_%d_%d.csv'%(frm,to)
if os.path.exists(filename):
print('loading from save file')
QQ=pd.read_csv(filename).values
else:
D=2**26
train_df['category'] = (train_df['ip'].astype(str) + "_" + train_df['app'].astype(str) + "_" + train_df['device'].astype(str) \
+ "_" + train_df['os'].astype(str)).apply(hash) % D
click_buffer= np.full(D, 3000000000, dtype=np.uint32)
train_df['epochtime']= train_df['click_time'].astype(np.int64) // 10 ** 9
next_clicks= []
for category, t in zip(reversed(train_df['category'].values), reversed(train_df['epochtime'].values)):
next_clicks.append(click_buffer[category]-t)
click_buffer[category]= t
del(click_buffer)
QQ= list(reversed(next_clicks))
if not debug:
print('saving')
pd.DataFrame(QQ).to_csv(filename,index=False)
train_df[new_feature] = QQ
predictors.append(new_feature)
train_df[new_feature+'_shift'] = pd.DataFrame(QQ).shift(+1).values
predictors.append(new_feature+'_shift')
del QQ
gc.collect()
print('grouping by ip-day-hour combination...')
gp = train_df[['ip','day','hour','channel']].groupby(by=['ip','day','hour'])[['channel']].count().reset_index().rename(index=str, columns={'channel': 'ip_tcount'})
train_df = train_df.merge(gp, on=['ip','day','hour'], how='left')
del gp
gc.collect()
print('grouping by ip-app combination...')
gp = train_df[['ip', 'app', 'channel']].groupby(by=['ip', 'app'])[['channel']].count().reset_index().rename(index=str, columns={'channel': 'ip_app_count'})
train_df = train_df.merge(gp, on=['ip','app'], how='left')
del gp
gc.collect()
print('grouping by ip-app-os combination...')
gp = train_df[['ip','app', 'os', 'channel']].groupby(by=['ip', 'app', 'os'])[['channel']].count().reset_index().rename(index=str, columns={'channel': 'ip_app_os_count'})
train_df = train_df.merge(gp, on=['ip','app', 'os'], how='left')
del gp
gc.collect()
# Adding features with var and mean hour (inspired from nuhsikander's script)
print('grouping by : ip_day_chl_var_hour')
gp = train_df[['ip','day','hour','channel']].groupby(by=['ip','day','channel'])[['hour']].var().reset_index().rename(index=str, columns={'hour': 'ip_tchan_count'})
train_df = train_df.merge(gp, on=['ip','day','channel'], how='left')
del gp
gc.collect()
print('grouping by : ip_app_os_var_hour')
gp = train_df[['ip','app', 'os', 'hour']].groupby(by=['ip', 'app', 'os'])[['hour']].var().reset_index().rename(index=str, columns={'hour': 'ip_app_os_var'})
train_df = train_df.merge(gp, on=['ip','app', 'os'], how='left')
del gp
gc.collect()
print('grouping by : ip_app_channel_var_day')
gp = train_df[['ip','app', 'channel', 'day']].groupby(by=['ip', 'app', 'channel'])[['day']].var().reset_index().rename(index=str, columns={'day': 'ip_app_channel_var_day'})
train_df = train_df.merge(gp, on=['ip','app', 'channel'], how='left')
del gp
gc.collect()
print('grouping by : ip_app_chl_mean_hour')
gp = train_df[['ip','app', 'channel','hour']].groupby(by=['ip', 'app', 'channel'])[['hour']].mean().reset_index().rename(index=str, columns={'hour': 'ip_app_channel_mean_hour'})
print("merging...")
train_df = train_df.merge(gp, on=['ip','app', 'channel'], how='left')
del gp
gc.collect()
print("vars and data type: ")
train_df.info()
train_df['ip_tcount'] = train_df['ip_tcount'].astype('uint16')
train_df['ip_app_count'] = train_df['ip_app_count'].astype('uint16')
train_df['ip_app_os_count'] = train_df['ip_app_os_count'].astype('uint16')
target = 'is_attributed'
predictors.extend(['app','device','os', 'channel', 'hour', 'day',
'ip_tcount', 'ip_tchan_count', 'ip_app_count',
'ip_app_os_count', 'ip_app_os_var',
'ip_app_channel_var_day','ip_app_channel_mean_hour'])
categorical = ['app', 'device', 'os', 'channel', 'hour', 'day']
for i in range(0,naddfeat):
predictors.append('X'+str(i))
print('predictors',predictors)
test_df = train_df[len_train:]
val_df = train_df[(len_train-val_size):len_train]
train_df = train_df[:(len_train-val_size)]
print("train size: ", len(train_df))
print("valid size: ", len(val_df))
print("test size : ", len(test_df))
sub = pd.DataFrame()
sub['click_id'] = test_df['click_id'].astype('int')
gc.collect()
print("Training...")
start_time = time.time()
params = {
'learning_rate': 0.20,
#'is_unbalance': 'true', # replaced with scale_pos_weight argument
'num_leaves': 7, # 2^max_depth - 1
'max_depth': 3, # -1 means no limit
'min_child_samples': 100, # Minimum number of data need in a child(min_data_in_leaf)
'max_bin': 100, # Number of bucketed bin for feature values
'subsample': 0.7, # Subsample ratio of the training instance.
'subsample_freq': 1, # frequence of subsample, <=0 means no enable
'colsample_bytree': 0.9, # Subsample ratio of columns when constructing each tree.
'min_child_weight': 0, # Minimum sum of instance weight(hessian) needed in a child(leaf)
'scale_pos_weight':200 # because training data is extremely unbalanced
}
(bst,best_iteration) = lgb_modelfit_nocv(params,
train_df,
val_df,
predictors,
target,
objective='binary',
metrics='auc',
early_stopping_rounds=30,
verbose_eval=True,
num_boost_round=1000,
categorical_features=categorical)
print('[{}]: model training time'.format(time.time() - start_time))
del train_df
del val_df
gc.collect()
print('Plot feature importances...')
ax = lgb.plot_importance(bst, max_num_features=100)
plt.show()
print("Predicting...")
sub['is_attributed'] = bst.predict(test_df[predictors],num_iteration=best_iteration)
if not debug:
print("writing...")
sub.to_csv('sub_it%d.csv.gz'%(fileno),index=False,compression='gzip')
print("done...")
return sub
nrows=184903891-1
nchunk=40000000
val_size=2500000
frm=nrows-75000000
if debug:
frm=0
nchunk=100000
val_size=10000
to=frm+nchunk
sub=DO(frm,to,0)
| apache-2.0 |
henriquegemignani/randovania | randovania/gui/tracker_window.py | 1 | 32424 | import collections
import functools
import json
import typing
from pathlib import Path
from random import Random
from typing import Optional, Dict, Set, List, Tuple, Iterator, Union
import matplotlib.pyplot as plt
import networkx
from PySide2 import QtWidgets
from PySide2.QtCore import Qt
from PySide2.QtWidgets import QMainWindow, QTreeWidgetItem, QCheckBox, QLabel, QGridLayout, QWidget, QMessageBox
from matplotlib.axes import Axes
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from randovania.game_description.area_location import AreaLocation
from randovania.game_description.game_description import GameDescription
from randovania.game_description.item.item_category import ItemCategory
from randovania.game_description.node import Node, ResourceNode, TranslatorGateNode, TeleporterNode, DockNode
from randovania.game_description.resources.item_resource_info import ItemResourceInfo
from randovania.game_description.resources.pickup_entry import PickupEntry
from randovania.game_description.resources.resource_info import add_resource_gain_to_current_resources
from randovania.game_description.resources.translator_gate import TranslatorGate
from randovania.game_description.world import World
from randovania.games.game import RandovaniaGame
from randovania.games.prime import patcher_file
from randovania.generator import generator
from randovania.gui.generated.tracker_window_ui import Ui_TrackerWindow
from randovania.gui.lib.common_qt_lib import set_default_window_icon
from randovania.gui.lib.custom_spin_box import CustomSpinBox
from randovania.layout import translator_configuration
from randovania.layout.echoes_configuration import EchoesConfiguration
from randovania.layout.teleporters import TeleporterShuffleMode
from randovania.layout.translator_configuration import LayoutTranslatorRequirement
from randovania.resolver.bootstrap import logic_bootstrap
from randovania.resolver.logic import Logic
from randovania.resolver.resolver_reach import ResolverReach
from randovania.resolver.state import State, add_pickup_to_state
class InvalidLayoutForTracker(Exception):
pass
def _load_previous_state(persistence_path: Path,
layout_configuration: EchoesConfiguration,
) -> Optional[dict]:
previous_layout_path = persistence_path.joinpath("layout_configuration.json")
try:
with previous_layout_path.open() as previous_layout_file:
previous_layout = EchoesConfiguration.from_json(json.load(previous_layout_file))
except (FileNotFoundError, TypeError, KeyError, ValueError, json.JSONDecodeError):
return None
if previous_layout != layout_configuration:
return None
previous_state_path = persistence_path.joinpath("state.json")
try:
with previous_state_path.open() as previous_state_file:
return json.load(previous_state_file)
except (FileNotFoundError, json.JSONDecodeError):
return None
class MatplotlibWidget(QtWidgets.QWidget):
ax: Axes
def __init__(self, parent=None):
super().__init__(parent)
fig = Figure(figsize=(7, 5), dpi=65, facecolor=(1, 1, 1), edgecolor=(0, 0, 0))
self.canvas = FigureCanvas(fig)
self.toolbar = NavigationToolbar(self.canvas, self)
lay = QtWidgets.QVBoxLayout(self)
lay.addWidget(self.toolbar)
lay.addWidget(self.canvas)
self.ax = fig.add_subplot(111)
self.line, *_ = self.ax.plot([])
class TrackerWindow(QMainWindow, Ui_TrackerWindow):
# Tracker state
_collected_pickups: Dict[PickupEntry, int]
_actions: List[Node]
# Tracker configuration
logic: Logic
game_description: GameDescription
layout_configuration: EchoesConfiguration
persistence_path: Path
_initial_state: State
_elevator_id_to_combo: Dict[int, QtWidgets.QComboBox]
_translator_gate_to_combo: Dict[TranslatorGate, QtWidgets.QComboBox]
_starting_nodes: Set[ResourceNode]
_undefined_item = ItemResourceInfo(-1, "Undefined", "Undefined", 0, None)
# UI tools
_asset_id_to_item: Dict[int, QTreeWidgetItem]
_node_to_item: Dict[Node, QTreeWidgetItem]
_widget_for_pickup: Dict[PickupEntry, Union[QCheckBox, CustomSpinBox]]
_during_setup = False
def __init__(self, persistence_path: Path, layout_configuration: EchoesConfiguration):
super().__init__()
self.setupUi(self)
set_default_window_icon(self)
self._collected_pickups = {}
self._widget_for_pickup = {}
self._actions = []
self._asset_id_to_item = {}
self._node_to_item = {}
self.layout_configuration = layout_configuration
self.persistence_path = persistence_path
player_pool = generator.create_player_pool(Random(0), self.layout_configuration, 0, 1)
pool_patches = player_pool.patches
self.game_description, self._initial_state = logic_bootstrap(layout_configuration,
player_pool.game,
pool_patches)
self.logic = Logic(self.game_description, layout_configuration)
self._initial_state.resources["add_self_as_requirement_to_resources"] = 1
self.menu_reset_action.triggered.connect(self._confirm_reset)
self.resource_filter_check.stateChanged.connect(self.update_locations_tree_for_reachable_nodes)
self.hide_collected_resources_check.stateChanged.connect(self.update_locations_tree_for_reachable_nodes)
self.undo_last_action_button.clicked.connect(self._undo_last_action)
self.configuration_label.setText("Trick Level: {}; Starts with:\n{}".format(
layout_configuration.trick_level.pretty_description,
", ".join(
resource.short_name
for resource in pool_patches.starting_items.keys()
)
))
self.setup_pickups_box(player_pool.pickups)
self.setup_possible_locations_tree()
self.setup_elevators()
self.setup_translator_gates()
self.matplot_widget = MatplotlibWidget(self.tab_graph_map)
self.tab_graph_map_layout.addWidget(self.matplot_widget)
self._world_to_node_positions = {}
self.map_tab_widget.currentChanged.connect(self._on_tab_changed)
for world in self.game_description.world_list.worlds:
self.graph_map_world_combo.addItem(world.name, world)
self.graph_map_world_combo.currentIndexChanged.connect(self.on_graph_map_world_combo)
persistence_path.mkdir(parents=True, exist_ok=True)
previous_state = _load_previous_state(persistence_path, layout_configuration)
if not self.apply_previous_state(previous_state):
self.setup_starting_location(None)
with persistence_path.joinpath("layout_configuration.json").open("w") as layout_file:
json.dump(layout_configuration.as_json, layout_file)
self._add_new_action(self._initial_state.node)
def apply_previous_state(self, previous_state: Optional[dict]) -> bool:
if previous_state is None:
return False
starting_location = None
needs_starting_location = len(self.layout_configuration.starting_location.locations) > 1
resource_db = self.game_description.resource_database
translator_gates = {}
try:
pickup_name_to_pickup = {pickup.name: pickup for pickup in self._collected_pickups.keys()}
quantity_to_change = {
pickup_name_to_pickup[pickup_name]: quantity
for pickup_name, quantity in previous_state["collected_pickups"].items()
}
previous_actions = [
self.game_description.world_list.all_nodes[index]
for index in previous_state["actions"]
]
if needs_starting_location:
starting_location = AreaLocation.from_json(previous_state["starting_location"])
elevators = {
int(elevator_id): AreaLocation.from_json(location) if location is not None else None
for elevator_id, location in previous_state["elevators"].items()
}
if self.layout_configuration.game == RandovaniaGame.PRIME2:
translator_gates = {
TranslatorGate(int(gate)): (resource_db.get_item(item)
if item is not None
else self._undefined_item)
for gate, item in previous_state["translator_gates"].items()
}
except KeyError:
return False
self.setup_starting_location(starting_location)
for elevator_id, area_location in elevators.items():
combo = self._elevator_id_to_combo[elevator_id]
if area_location is None:
combo.setCurrentIndex(0)
continue
for i in range(combo.count()):
if area_location == combo.itemData(i):
combo.setCurrentIndex(i)
break
for gate, item in translator_gates.items():
combo = self._translator_gate_to_combo[gate]
for i in range(combo.count()):
if item == combo.itemData(i):
combo.setCurrentIndex(i)
break
self.bulk_change_quantity(quantity_to_change)
self._add_new_actions(previous_actions)
return True
def reset(self):
self.bulk_change_quantity({
pickup: 0
for pickup in self._collected_pickups.keys()
})
while len(self._actions) > 1:
self._actions.pop()
self.actions_list.takeItem(len(self._actions))
for elevator in self._elevator_id_to_combo.values():
elevator.setCurrentIndex(0)
for elevator in self._translator_gate_to_combo.values():
elevator.setCurrentIndex(0)
self._refresh_for_new_action()
def _confirm_reset(self):
reply = QMessageBox.question(self, "Reset Tracker?", "Do you want to reset the tracker progression?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
self.reset()
@property
def _show_only_resource_nodes(self) -> bool:
return self.resource_filter_check.isChecked()
@property
def _hide_collected_resources(self) -> bool:
return self.hide_collected_resources_check.isChecked()
@property
def _collected_nodes(self) -> Set[ResourceNode]:
return self._starting_nodes | set(action for action in self._actions if action.is_resource_node)
def _pretty_node_name(self, node: Node) -> str:
world_list = self.game_description.world_list
return "{} / {}".format(world_list.area_name(world_list.nodes_to_area(node)), node.name)
def _refresh_for_new_action(self):
self.undo_last_action_button.setEnabled(len(self._actions) > 1)
self.current_location_label.setText("Current location: {}".format(self._pretty_node_name(self._actions[-1])))
self.update_locations_tree_for_reachable_nodes()
def _add_new_action(self, node: Node):
self._add_new_actions([node])
def _add_new_actions(self, nodes: Iterator[Node]):
for node in nodes:
self.actions_list.addItem(self._pretty_node_name(node))
self._actions.append(node)
self._refresh_for_new_action()
def _undo_last_action(self):
self._actions.pop()
self.actions_list.takeItem(len(self._actions))
self._refresh_for_new_action()
def _on_tree_node_double_clicked(self, item: QTreeWidgetItem, _):
node: Optional[Node] = getattr(item, "node", None)
if not item.isDisabled() and node is not None and node != self._actions[-1]:
self._add_new_action(node)
def _positions_for_world(self, world: World):
g = networkx.DiGraph()
world_list = self.game_description.world_list
state = self.state_for_current_configuration()
for area in world.areas:
g.add_node(area)
for area in world.areas:
nearby_areas = set()
for node in area.nodes:
if isinstance(node, DockNode):
try:
target_node = world_list.resolve_dock_node(node, state.patches)
nearby_areas.add(world_list.nodes_to_area(target_node))
except IndexError as e:
print(f"For {node.name} in {area.name}, received {e}")
continue
for other_area in nearby_areas:
g.add_edge(area, other_area)
return networkx.drawing.spring_layout(g)
def update_matplot_widget(self, nodes_in_reach: Set[Node]):
g = networkx.DiGraph()
world_list = self.game_description.world_list
state = self.state_for_current_configuration()
world = self.graph_map_world_combo.currentData()
for area in world.areas:
g.add_node(area)
for area in world.areas:
nearby_areas = set()
for node in area.nodes:
if node not in nodes_in_reach:
continue
if isinstance(node, DockNode):
# TODO: respect is_blast_shield: if already opened once, no requirement needed.
# Includes opening form behind with different criteria
try:
target_node = world_list.resolve_dock_node(node, state.patches)
dock_weakness = state.patches.dock_weakness.get((area.area_asset_id, node.dock_index),
node.default_dock_weakness)
if dock_weakness.requirement.satisfied(state.resources, state.energy):
nearby_areas.add(world_list.nodes_to_area(target_node))
except IndexError as e:
print(f"For {node.name} in {area.name}, received {e}")
continue
for other_area in nearby_areas:
g.add_edge(area, other_area)
self.matplot_widget.ax.clear()
cf = self.matplot_widget.ax.get_figure()
cf.set_facecolor("w")
if world.world_asset_id not in self._world_to_node_positions:
self._world_to_node_positions[world.world_asset_id] = self._positions_for_world(world)
pos = self._world_to_node_positions[world.world_asset_id]
networkx.draw_networkx_nodes(g, pos, ax=self.matplot_widget.ax)
networkx.draw_networkx_edges(g, pos, arrows=True, ax=self.matplot_widget.ax)
networkx.draw_networkx_labels(g, pos, ax=self.matplot_widget.ax,
labels={area: area.name for area in world.areas},
verticalalignment='top')
self.matplot_widget.ax.set_axis_off()
plt.draw_if_interactive()
self.matplot_widget.canvas.draw()
def on_graph_map_world_combo(self):
nodes_in_reach = self.current_nodes_in_reach(self.state_for_current_configuration())
self.update_matplot_widget(nodes_in_reach)
def current_nodes_in_reach(self, state):
if state is None:
nodes_in_reach = set()
else:
reach = ResolverReach.calculate_reach(self.logic, state)
nodes_in_reach = set(reach.nodes)
nodes_in_reach.add(state.node)
return nodes_in_reach
def _on_tab_changed(self):
if self.map_tab_widget.currentWidget() == self.tab_graph_map:
self.on_graph_map_world_combo()
def update_locations_tree_for_reachable_nodes(self):
state = self.state_for_current_configuration()
nodes_in_reach = self.current_nodes_in_reach(state)
if self.map_tab_widget.currentWidget() == self.tab_graph_map:
self.update_matplot_widget(nodes_in_reach)
all_nodes = self.game_description.world_list.all_nodes
for world in self.game_description.world_list.worlds:
for area in world.areas:
area_is_visible = False
for node in area.nodes:
is_collected = node in self._collected_nodes
is_visible = node in nodes_in_reach and not (self._hide_collected_resources
and is_collected)
if self._show_only_resource_nodes:
is_visible = is_visible and node.is_resource_node
node_item = self._node_to_item[node]
node_item.setHidden(not is_visible)
if node.is_resource_node:
resource_node = typing.cast(ResourceNode, node)
node_item.setDisabled(not resource_node.can_collect(state.patches, state.resources, all_nodes))
node_item.setCheckState(0, Qt.Checked if is_collected else Qt.Unchecked)
area_is_visible = area_is_visible or is_visible
self._asset_id_to_item[area.area_asset_id].setHidden(not area_is_visible)
# Persist the current state
self.persist_current_state()
def persist_current_state(self):
world_list = self.game_description.world_list
with self.persistence_path.joinpath("state.json").open("w") as state_file:
json.dump(
{
"actions": [
node.index
for node in self._actions
],
"collected_pickups": {
pickup.name: quantity
for pickup, quantity in self._collected_pickups.items()
},
"elevators": {
str(elevator_id): combo.currentData().as_json if combo.currentIndex() > 0 else None
for elevator_id, combo in self._elevator_id_to_combo.items()
},
"translator_gates": {
str(gate.index): combo.currentData().index if combo.currentIndex() > 0 else None
for gate, combo in self._translator_gate_to_combo.items()
},
"starting_location": world_list.node_to_area_location(self._initial_state.node).as_json,
},
state_file
)
def setup_possible_locations_tree(self):
"""
Creates the possible_locations_tree with all worlds, areas and nodes.
"""
self.possible_locations_tree.itemDoubleClicked.connect(self._on_tree_node_double_clicked)
# TODO: Dark World names
for world in self.game_description.world_list.worlds:
world_item = QTreeWidgetItem(self.possible_locations_tree)
world_item.setText(0, world.name)
world_item.setExpanded(True)
self._asset_id_to_item[world.world_asset_id] = world_item
for area in world.areas:
area_item = QTreeWidgetItem(world_item)
area_item.area = area
area_item.setText(0, area.name)
area_item.setHidden(True)
self._asset_id_to_item[area.area_asset_id] = area_item
for node in area.nodes:
node_item = QTreeWidgetItem(area_item)
if isinstance(node, TranslatorGateNode):
node_item.setText(0, "{} ({})".format(node.name, node.gate))
else:
node_item.setText(0, node.name)
node_item.node = node
if node.is_resource_node:
node_item.setFlags(node_item.flags() & ~Qt.ItemIsUserCheckable)
self._node_to_item[node] = node_item
def setup_elevators(self):
world_list = self.game_description.world_list
nodes_by_world: Dict[str, List[TeleporterNode]] = collections.defaultdict(list)
self._elevator_id_to_combo = {}
areas_to_not_change = {
2278776548, # Sky Temple Gateway
2068511343, # Sky Temple Energy Controller
3136899603, # Aerie Transport Station
1564082177, # Aerie
}
targets = {}
for world, area, node in world_list.all_worlds_areas_nodes:
if isinstance(node, TeleporterNode) and node.editable and area.area_asset_id not in areas_to_not_change:
name = world.correct_name(area.in_dark_aether)
nodes_by_world[name].append(node)
location = AreaLocation(world.world_asset_id, area.area_asset_id)
targets[patcher_file.elevator_area_name(world_list, location, True)] = location
if self.layout_configuration.elevators.mode == TeleporterShuffleMode.ONE_WAY_ANYTHING:
targets = {}
for world in world_list.worlds:
for area in world.areas:
name = world.correct_name(area.in_dark_aether)
targets[f"{name} - {area.name}"] = AreaLocation(world.world_asset_id, area.area_asset_id)
combo_targets = sorted(targets.items(), key=lambda it: it[0])
for world_name in sorted(nodes_by_world.keys()):
nodes = nodes_by_world[world_name]
nodes_locations = [AreaLocation(world_list.nodes_to_world(node).world_asset_id,
world_list.nodes_to_area(node).area_asset_id)
for node in nodes]
nodes_names = [patcher_file.elevator_area_name(world_list, location, True)
for location in nodes_locations]
nodes = sorted(nodes_by_world[world_name], key=lambda it: world_list.nodes_to_area(it).name)
group = QtWidgets.QGroupBox(self.elevators_scroll_contents)
group.setTitle(world_name)
self.elevators_scroll_layout.addWidget(group)
layout = QtWidgets.QGridLayout(group)
for i, (node, location, name) in enumerate(sorted(zip(nodes, nodes_locations, nodes_names),
key=lambda it: it[2])):
node_name = QtWidgets.QLabel(group)
node_name.setText(name)
layout.addWidget(node_name, i, 0)
combo = QtWidgets.QComboBox(group)
if self.layout_configuration.elevators.is_vanilla:
combo.addItem("Vanilla", node.default_connection)
combo.setEnabled(False)
else:
combo.addItem("Undefined", location)
for target_name, connection in combo_targets:
combo.addItem(target_name, connection)
combo.currentIndexChanged.connect(self.update_locations_tree_for_reachable_nodes)
self._elevator_id_to_combo[node.teleporter_instance_id] = combo
layout.addWidget(combo, i, 1)
def setup_translator_gates(self):
world_list = self.game_description.world_list
resource_db = self.game_description.resource_database
self._translator_gate_to_combo = {}
if self.layout_configuration.game != RandovaniaGame.PRIME2:
return
gates = {
f"{area.name} ({node.gate.index})": node.gate
for world, area, node in world_list.all_worlds_areas_nodes
if isinstance(node, TranslatorGateNode)
}
translator_requirement = self.layout_configuration.translator_configuration.translator_requirement
for i, (gate_name, gate) in enumerate(sorted(gates.items(), key=lambda it: it[0])):
node_name = QtWidgets.QLabel(self.translator_gate_scroll_contents)
node_name.setText(gate_name)
self.translator_gate_scroll_layout.addWidget(node_name, i, 0)
combo = QtWidgets.QComboBox(self.translator_gate_scroll_contents)
gate_requirement = translator_requirement[gate]
if gate_requirement in (LayoutTranslatorRequirement.RANDOM,
LayoutTranslatorRequirement.RANDOM_WITH_REMOVED):
combo.addItem("Undefined", self._undefined_item)
for translator, index in translator_configuration.ITEM_INDICES.items():
combo.addItem(translator.long_name, resource_db.get_item(index))
else:
combo.addItem(gate_requirement.long_name, resource_db.get_item(gate_requirement.item_index))
combo.setEnabled(False)
combo.currentIndexChanged.connect(self.update_locations_tree_for_reachable_nodes)
self._translator_gate_to_combo[gate] = combo
self.translator_gate_scroll_layout.addWidget(combo, i, 1)
def setup_starting_location(self, area_location: Optional[AreaLocation]):
world_list = self.game_description.world_list
if len(self.layout_configuration.starting_location.locations) > 1:
if area_location is None:
area_locations = sorted(self.layout_configuration.starting_location.locations,
key=lambda it: world_list.area_name(world_list.area_by_area_location(it)))
location_names = [world_list.area_name(world_list.area_by_area_location(it))
for it in area_locations]
selected_name = QtWidgets.QInputDialog.getItem(self, "Starting Location", "Select starting location",
location_names, 0, False)
area_location = area_locations[location_names.index(selected_name[0])]
self._initial_state.node = world_list.resolve_teleporter_connection(area_location)
self._starting_nodes = {
node
for node in world_list.all_nodes
if node.is_resource_node and node.resource() in self._initial_state.resources
}
def _change_item_quantity(self, pickup: PickupEntry, use_quantity_as_bool: bool, quantity: int):
if use_quantity_as_bool:
if bool(quantity):
quantity = 1
else:
quantity = 0
self._collected_pickups[pickup] = quantity
if not self._during_setup:
self.update_locations_tree_for_reachable_nodes()
def bulk_change_quantity(self, new_quantity: Dict[PickupEntry, int]):
self._during_setup = True
for pickup, quantity in new_quantity.items():
widget = self._widget_for_pickup[pickup]
if isinstance(widget, QCheckBox):
widget.setChecked(quantity > 0)
else:
widget.setValue(quantity)
self._during_setup = False
def _create_widgets_with_quantity(self,
pickup: PickupEntry,
parent_widget: QWidget,
parent_layout: QGridLayout,
row: int,
quantity: int,
):
label = QLabel(parent_widget)
label.setText(pickup.name)
parent_layout.addWidget(label, row, 0)
spin_bix = CustomSpinBox(parent_widget)
spin_bix.setMaximumWidth(50)
spin_bix.setMaximum(quantity)
spin_bix.valueChanged.connect(functools.partial(self._change_item_quantity, pickup, False))
self._widget_for_pickup[pickup] = spin_bix
parent_layout.addWidget(spin_bix, row, 1)
def setup_pickups_box(self, item_pool: List[PickupEntry]):
parent_widgets: Dict[ItemCategory, Tuple[QWidget, QGridLayout]] = {
ItemCategory.EXPANSION: (self.expansions_box, self.expansions_layout),
ItemCategory.ENERGY_TANK: (self.expansions_box, self.expansions_layout),
ItemCategory.TRANSLATOR: (self.translators_box, self.translators_layout),
ItemCategory.TEMPLE_KEY: (self.keys_box, self.keys_layout),
ItemCategory.SKY_TEMPLE_KEY: (self.keys_box, self.keys_layout),
}
major_pickup_parent_widgets = (self.upgrades_box, self.upgrades_layout)
row_for_parent = {
self.expansions_box: 0,
self.translators_box: 0,
self.upgrades_box: 0,
self.keys_box: 0,
}
column_for_parent = {
self.translators_box: 0,
self.upgrades_box: 0,
self.keys_box: 0,
}
k_column_count = 2
pickup_by_name = {}
pickup_with_quantity = {}
for pickup in item_pool:
if pickup.name in pickup_by_name:
pickup_with_quantity[pickup_by_name[pickup.name]] += 1
else:
pickup_by_name[pickup.name] = pickup
pickup_with_quantity[pickup] = 1
non_expansions_with_quantity = []
for pickup, quantity in pickup_with_quantity.items():
self._collected_pickups[pickup] = 0
parent_widget, parent_layout = parent_widgets.get(pickup.item_category, major_pickup_parent_widgets)
row = row_for_parent[parent_widget]
if parent_widget is self.expansions_box:
self._create_widgets_with_quantity(pickup, parent_widget, parent_layout, row, quantity)
row_for_parent[parent_widget] += 1
else:
if quantity > 1:
non_expansions_with_quantity.append((parent_widget, parent_layout, pickup, quantity))
else:
check_box = QCheckBox(parent_widget)
check_box.setText(pickup.name)
check_box.stateChanged.connect(functools.partial(self._change_item_quantity, pickup, True))
self._widget_for_pickup[pickup] = check_box
column = column_for_parent[parent_widget]
parent_layout.addWidget(check_box, row, column)
column += 1
if column >= k_column_count:
column = 0
row += 1
row_for_parent[parent_widget] = row
column_for_parent[parent_widget] = column
for parent_widget, parent_layout, pickup, quantity in non_expansions_with_quantity:
if column_for_parent[parent_widget] != 0:
column_for_parent[parent_widget] = 0
row_for_parent[parent_widget] += 1
self._create_widgets_with_quantity(pickup, parent_widget, parent_layout,
row_for_parent[parent_widget],
quantity)
row_for_parent[parent_widget] += 1
def state_for_current_configuration(self) -> Optional[State]:
all_nodes = self.game_description.world_list.all_nodes
state = self._initial_state.copy()
if self._actions:
state.node = self._actions[-1]
for teleporter, combo in self._elevator_id_to_combo.items():
assert combo.currentData() is not None
state.patches.elevator_connection[teleporter] = combo.currentData()
for gate, item in self._translator_gate_to_combo.items():
state.patches.translator_gates[gate] = item.currentData()
for pickup, quantity in self._collected_pickups.items():
for _ in range(quantity):
add_pickup_to_state(state, pickup)
for node in self._collected_nodes:
add_resource_gain_to_current_resources(node.resource_gain_on_collect(state.patches, state.resources,
all_nodes),
state.resources)
return state
| gpl-3.0 |
gplssm/europepstrans | europepstrans/results/__init__.py | 1 | 13654 | """
TimeFrameResults steals methods from oemof.outputlib adapted to the structure
applied here. Most relevant difference is results data stored in self.data
"""
from oemof.outputlib import DataFramePlot, ResultsDataFrame
import pickle
from matplotlib import pyplot as plt
import logging
import pandas as pd
class TimeFrameResults:
"""
Container for results of one time frame (i.e. one year)
Attributes
----------
data : DataFrame
Structure multi-indexed result data
"""
def __init__(self, **kwargs):
"""
Initializes data object based on oemof results class
"""
results_file = kwargs.get('results_file', None)
self.subset = kwargs.get('subset', None)
self.ax = kwargs.get('ax')
if results_file is None:
# self.data = DataFramePlot(energy_system=kwargs.get('energy_system'))
self.data = ResultsDataFrame(energy_system=kwargs.get('energy_system'))
else:
self.data = pickle.load(open(results_file, 'rb'))
self.reformat_data()
def preview(self):
"""
Print short preview of data
"""
return self.data.head()
def reformat_data(self):
"""
Extract region information from bus label put into separate index label
"""
# TODO: get regions list from elsewhere
regions = ['deu', 'xfra', 'xbnl']
regions_leading_underscore = ['_' + x for x in regions]
# put bus_label to column (required to work on)
self.data.reset_index(level='bus_label', inplace=True)
self.data.reset_index(level='obj_label', inplace=True)
# extra region from bus label and write to new column
self.data['region'] = self.data['bus_label'].str.extract(
r"(?=(" + '|'.join(regions) + r"))", expand=True)
self.data['region'].fillna('global', inplace=True)
# remove region from bus_label and obj_label
self.data['bus_label'] = self.data['bus_label'].str.replace(
r"(" + '|'.join(regions_leading_underscore) + r")", '')
self.data['obj_label'] = self.data['obj_label'].str.replace(
r"(" + '|'.join(regions_leading_underscore) + r")", '')
# put bus_label back to index
self.data = self.data.set_index(['bus_label', 'region', 'obj_label'],
append=True)
# reorder and resort levels
level_order = ['bus_label', 'type', 'obj_label', 'region', 'datetime']
self.data = self.data.reorder_levels(level_order)
def slice_by(self, **kwargs):
r""" Method for slicing the ResultsDataFrame. A subset is returned.
Parameters
----------
bus_label : string
type : string (to_bus/from_bus/other)
obj_label: string
date_from : string
Start date selection e.g. "2016-01-01 00:00:00". If not set, the
whole time range will be plotted.
date_to : string
End date selection e.g. "2016-03-01 00:00:00". If not set, the
whole time range will be plotted.
"""
kwargs.setdefault('bus_label', slice(None))
kwargs.setdefault('type', slice(None))
kwargs.setdefault('obj_label', slice(None))
kwargs.setdefault(
'date_from', self.data.index.get_level_values('datetime')[0])
kwargs.setdefault(
'date_to', self.data.index.get_level_values('datetime')[-1])
# slicing
idx = pd.IndexSlice
subset = self.data.loc[idx[
kwargs['bus_label'],
kwargs['type'],
kwargs['obj_label'],
slice(pd.Timestamp(kwargs['date_from']),
pd.Timestamp(kwargs['date_to']))], :]
return subset
def slice_unstacked(self, unstacklevel='obj_label',
formatted=False, **kwargs):
r"""Method for slicing the ResultsDataFrame. An unstacked
subset is returned.
Parameters
----------
unstacklevel : string (default: 'obj_label')
Level to unstack the subset of the DataFrame.
formatted : boolean
missing...
"""
subset = self.slice_by(**kwargs)
subset = subset.unstack(level=unstacklevel)
if formatted is True:
subset.reset_index(level=['bus_label', 'type'], drop=True,
inplace=True)
# user standard insteadt of multi-indexed columns
subset.columns = subset.columns.get_level_values(1).unique()
# return subset
self.subset = subset
def plot(self, **kwargs):
r""" Passing the data attribute to the pandas plotting method. All
parameters will be directly passed to pandas.DataFrame.plot(). See
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html
for more information.
Returns
-------
self
"""
self.ax = self.subset.plot(**kwargs)
return self
def io_plot(self, bus_label, cdict, line_kwa=None, lineorder=None,
bar_kwa=None, barorder=None, **kwargs):
r""" Plotting a combined bar and line plot to see the fitting of in-
and outcomming flows of a bus balance.
Parameters
----------
bus_label : string
Uid of the bus to plot the balance.
cdict : dictionary
A dictionary that has all possible components as keys and its
colors as items.
line_kwa : dictionary
Keyword arguments to be passed to the pandas line plot.
bar_kwa : dictionary
Keyword arguments to be passed to the pandas bar plot.
lineorder : list
Order of columns to plot the line plot
barorder : list
Order of columns to plot the bar plot
Note
----
Further keyword arguments will be passed to the
:class:`slice_unstacked method <DataFramePlot.slice_unstacked>`.
Returns
-------
handles, labels
Manipulated labels to correct the unsual construction of the
stack line plot. You can use them for further maipulations.
"""
self.ax = kwargs.get('ax', self.ax)
if bar_kwa is None:
bar_kwa = dict()
if line_kwa is None:
line_kwa = dict()
if self.ax is None:
fig = plt.figure()
self.ax = fig.add_subplot(1, 1, 1)
# Create a bar plot for all input flows
self.slice_unstacked(bus_label=bus_label, type='to_bus', **kwargs)
if barorder is not None:
self.rearrange_subset(barorder)
self.subset.plot(kind='bar', linewidth=0, stacked=True, width=1,
ax=self.ax, color=self.color_from_dict(cdict),
**bar_kwa)
# Create a line plot for all output flows
self.slice_unstacked(bus_label=bus_label, type='from_bus', **kwargs)
if lineorder is not None:
self.rearrange_subset(lineorder)
# The following changes are made to have the bottom line on top layer
# of all lines. Normally the bottom line is the first line that is
# plotted and will be on the lowest layer. This is difficult to read.
new_df = pd.DataFrame(index=self.subset.index)
n = 0
tmp = 0
for col in self.subset.columns:
if n < 1:
new_df[col] = self.subset[col]
else:
new_df[col] = self.subset[col] + tmp
tmp = new_df[col]
n += 1
if lineorder is None:
new_df.sort_index(axis=1, ascending=False, inplace=True)
else:
lineorder.reverse()
new_df = new_df[lineorder]
colorlist = self.color_from_dict(cdict)
if isinstance(colorlist, list):
colorlist.reverse()
separator = len(colorlist)
new_df.plot(kind='line', ax=self.ax, color=colorlist,
drawstyle='steps-mid', **line_kwa)
# Adapt the legend to the new oder
handles, labels = self.ax.get_legend_handles_labels()
tmp_lab = [x for x in reversed(labels[0:separator])]
tmp_hand = [x for x in reversed(handles[0:separator])]
handles = tmp_hand + handles[separator:]
labels = tmp_lab + labels[separator:]
labels.reverse()
handles.reverse()
self.ax.legend(handles, labels)
return handles, labels
def rearrange_subset(self, order):
r"""
Change the order of the subset DataFrame
Parameters
----------
order : list
New order of columns
Returns
-------
self
"""
cols = list(self.subset.columns.values)
neworder = [x for x in list(order) if x in set(cols)]
missing = [x for x in list(cols) if x not in set(order)]
if len(missing) > 0:
logging.warning(
"Columns that are not part of the order list are removed: " +
str(missing))
self.subset = self.subset[neworder]
def color_from_dict(self, colordict):
r""" Method to convert a dictionary containing the components and its
colors to a color list that can be directly useed with the color
parameter of the pandas plotting method.
Parameters
----------
colordict : dictionary
A dictionary that has all possible components as keys and its
colors as items.
Returns
-------
list
Containing the colors of all components of the subset attribute
"""
tmplist = list(
map(colordict.get, list(self.subset.columns)))
tmplist = ['#00FFFF' if v is None else v for v in tmplist]
if len(tmplist) == 1:
colorlist = tmplist[0]
else:
colorlist = tmplist
return colorlist
def set_datetime_ticks(self, tick_distance=None, number_autoticks=3,
date_format='%d-%m-%Y %H:%M'):
r""" Set configurable ticks for the time axis. One can choose the
number of ticks or the distance between ticks and the format.
Parameters
----------
tick_distance : real
The disctance between to ticks in hours. If not set autoticks are
set (see number_autoticks).
number_autoticks : int (default: 3)
The number of ticks on the time axis, independent of the time
range. The higher the number of ticks is, the shorter should be the
date_format string.
date_format : string (default: '%d-%m-%Y %H:%M')
The string to define the format of the date and time. See
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
for more information.
"""
dates = self.subset.index.get_level_values('datetime').unique()
if tick_distance is None:
tick_distance = int(len(dates) / number_autoticks) - 1
self.ax.set_xticks(range(0, len(dates), tick_distance),
minor=False)
self.ax.set_xticklabels(
[item.strftime(date_format)
for item in dates.tolist()[0::tick_distance]],
rotation=0, minor=False)
def outside_legend(self, reverse=False, plotshare=0.9, **kwargs):
r""" Move the legend outside the plot. Bases on the ideas of Joe
Kington. See
http://stackoverflow.com/questions/4700614/how-to-put-the-legend-out-of-the-plot
for more information.
Parameters
----------
reverse : boolean (default: False)
Print out the legend in reverse order. This is interesting for
stack-plots to have the legend in the same order as the stacks.
plotshare : real (default: 0.9)
Share of the plot area to create space for the legend (0 to 1).
loc : string (default: 'center left')
Location of the plot.
bbox_to_anchor : tuple (default: (1, 0.5))
Set the anchor for the legend.
ncol : integer (default: 1)
Number of columns of the legend.
handles : list of handles
A list of handels if they are already modified by another function
or method. Normally these handles will be automatically taken from
the artis object.
lables : list of labels
A list of labels if they are already modified by another function
or method. Normally these handles will be automatically taken from
the artis object.
Note
----
All keyword arguments (kwargs) will be directly passed to the
matplotlib legend class. See
http://matplotlib.org/api/legend_api.html#matplotlib.legend.Legend
for more parameters.
"""
kwargs.setdefault('loc', 'center left')
kwargs.setdefault('bbox_to_anchor', (1, 0.5))
kwargs.setdefault('ncol', 1)
handles = kwargs.pop('handles', self.ax.get_legend_handles_labels()[0])
labels = kwargs.pop('labels', self.ax.get_legend_handles_labels()[1])
if reverse:
handles.reverse()
labels.reverse()
box = self.ax.get_position()
self.ax.set_position([box.x0, box.y0, box.width * plotshare,
box.height])
self.ax.legend(handles, labels, **kwargs)
if __name__ == '__main__':
pass | gpl-3.0 |
caisq/tensorflow | tensorflow/python/estimator/canned/dnn_test.py | 25 | 16780 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn.DNNClassifier(*args, **kwargs)
class DNNModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, dnn._dnn_model_fn)
class DNNLogitFnTest(dnn_testing_utils.BaseDNNLogitFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNLogitFnTest.__init__(self,
dnn._dnn_logit_fn_builder)
class DNNWarmStartingTest(dnn_testing_utils.BaseDNNWarmStartingTest,
test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNWarmStartingTest.__init__(self, _dnn_classifier_fn,
_dnn_regressor_fn)
class DNNClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
def _dnn_regressor_fn(*args, **kwargs):
return dnn.DNNRegressor(*args, **kwargs)
class DNNRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
class DNNRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNRegressor(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
class DNNClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
if __name__ == '__main__':
test.main()
| apache-2.0 |
samzhang111/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
allenxwang/kafka | system_test/utils/metrics.py | 28 | 13903 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# file: metrics.py
# ===================================
import inspect
import json
import logging
import os
import signal
import subprocess
import sys
import traceback
import csv
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from collections import namedtuple
import numpy
from pyh import *
import kafka_system_test_utils
import system_test_utils
logger = logging.getLogger("namedLogger")
thisClassName = '(metrics)'
d = {'name_of_class': thisClassName}
attributeNameToNameInReportedFileMap = {
'Min': 'min',
'Max': 'max',
'Mean': 'mean',
'50thPercentile': 'median',
'StdDev': 'stddev',
'95thPercentile': '95%',
'99thPercentile': '99%',
'999thPercentile': '99.9%',
'Count': 'count',
'OneMinuteRate': '1 min rate',
'MeanRate': 'mean rate',
'FiveMinuteRate': '5 min rate',
'FifteenMinuteRate': '15 min rate',
'Value': 'value'
}
def getCSVFileNameFromMetricsMbeanName(mbeanName):
return mbeanName.replace(":type=", ".").replace(",name=", ".") + ".csv"
def read_metrics_definition(metricsFile):
metricsFileData = open(metricsFile, "r").read()
metricsJsonData = json.loads(metricsFileData)
allDashboards = metricsJsonData['dashboards']
allGraphs = []
for dashboard in allDashboards:
dashboardName = dashboard['name']
graphs = dashboard['graphs']
for graph in graphs:
bean = graph['bean_name']
allGraphs.append(graph)
attributes = graph['attributes']
#print "Filtering on attributes " + attributes
return allGraphs
def get_dashboard_definition(metricsFile, role):
metricsFileData = open(metricsFile, "r").read()
metricsJsonData = json.loads(metricsFileData)
allDashboards = metricsJsonData['dashboards']
dashboardsForRole = []
for dashboard in allDashboards:
if dashboard['role'] == role:
dashboardsForRole.append(dashboard)
return dashboardsForRole
def ensure_valid_headers(headers, attributes):
if headers[0] != "# time":
raise Exception("First column should be time")
for header in headers:
logger.debug(header, extra=d)
# there should be exactly one column with a name that matches attributes
try:
attributeColumnIndex = headers.index(attributes)
return attributeColumnIndex
except ValueError as ve:
#print "#### attributes : ", attributes
#print "#### headers : ", headers
raise Exception("There should be exactly one column that matches attribute: {0} in".format(attributes) +
" headers: {0}".format(",".join(headers)))
def plot_graphs(inputCsvFiles, labels, title, xLabel, yLabel, attribute, outputGraphFile):
# create empty plot
fig=plt.figure()
fig.subplots_adjust(bottom=0.2)
ax=fig.add_subplot(111)
labelx = -0.3 # axes coords
ax.set_xlabel(xLabel)
ax.set_ylabel(yLabel)
ax.grid()
#ax.yaxis.set_label_coords(labelx, 0.5)
Coordinates = namedtuple("Coordinates", 'x y')
plots = []
coordinates = []
# read data for all files, organize by label in a dict
for fileAndLabel in zip(inputCsvFiles, labels):
inputCsvFile = fileAndLabel[0]
label = fileAndLabel[1]
csv_reader = list(csv.reader(open(inputCsvFile, "rb")))
x,y = [],[]
xticks_labels = []
try:
# read first line as the headers
headers = csv_reader.pop(0)
attributeColumnIndex = ensure_valid_headers(headers, attributeNameToNameInReportedFileMap[attribute])
logger.debug("Column index for attribute {0} is {1}".format(attribute, attributeColumnIndex), extra=d)
start_time = (int)(os.path.getctime(inputCsvFile) * 1000)
int(csv_reader[0][0])
for line in csv_reader:
if(len(line) == 0):
continue
yVal = float(line[attributeColumnIndex])
xVal = int(line[0])
y.append(yVal)
epoch= start_time + int(line[0])
x.append(xVal)
xticks_labels.append(time.strftime("%H:%M:%S", time.localtime(epoch)))
coordinates.append(Coordinates(xVal, yVal))
p1 = ax.plot(x,y)
plots.append(p1)
except Exception as e:
logger.error("ERROR while plotting data for {0}: {1}".format(inputCsvFile, e), extra=d)
traceback.print_exc()
# find xmin, xmax, ymin, ymax from all csv files
xmin = min(map(lambda coord: coord.x, coordinates))
xmax = max(map(lambda coord: coord.x, coordinates))
ymin = min(map(lambda coord: coord.y, coordinates))
ymax = max(map(lambda coord: coord.y, coordinates))
# set x and y axes limits
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
# set ticks accordingly
xticks = numpy.arange(xmin, xmax, 0.2*xmax)
# yticks = numpy.arange(ymin, ymax)
plt.xticks(xticks,xticks_labels,rotation=17)
# plt.yticks(yticks)
plt.legend(plots,labels, loc=2)
plt.title(title)
plt.savefig(outputGraphFile)
def draw_all_graphs(metricsDescriptionFile, testcaseEnv, clusterConfig):
# go through each role and plot graphs for the role's metrics
roles = set(map(lambda config: config['role'], clusterConfig))
for role in roles:
dashboards = get_dashboard_definition(metricsDescriptionFile, role)
entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role)
for dashboard in dashboards:
graphs = dashboard['graphs']
# draw each graph for all entities
draw_graph_for_role(graphs, entities, role, testcaseEnv)
def draw_graph_for_role(graphs, entities, role, testcaseEnv):
for graph in graphs:
graphName = graph['graph_name']
yLabel = graph['y_label']
inputCsvFiles = []
graphLegendLabels = []
for entity in entities:
entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entity['entity_id'], "metrics")
entityMetricCsvFile = entityMetricsDir + "/" + getCSVFileNameFromMetricsMbeanName(graph['bean_name'])
if(not os.path.exists(entityMetricCsvFile)):
logger.warn("The file {0} does not exist for plotting".format(entityMetricCsvFile), extra=d)
else:
inputCsvFiles.append(entityMetricCsvFile)
graphLegendLabels.append(role + "-" + entity['entity_id'])
# print "Plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
try:
# plot one graph per mbean attribute
labels = graph['y_label'].split(',')
fullyQualifiedAttributeNames = map(lambda attribute: graph['bean_name'] + ':' + attribute,
graph['attributes'].split(','))
attributes = graph['attributes'].split(',')
for labelAndAttribute in zip(labels, fullyQualifiedAttributeNames, attributes):
outputGraphFile = testcaseEnv.testCaseDashboardsDir + "/" + role + "/" + labelAndAttribute[1] + ".svg"
plot_graphs(inputCsvFiles, graphLegendLabels, graph['graph_name'] + '-' + labelAndAttribute[2],
"time", labelAndAttribute[0], labelAndAttribute[2], outputGraphFile)
# print "Finished plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
except Exception as e:
logger.error("ERROR while plotting graph {0}: {1}".format(outputGraphFile, e), extra=d)
traceback.print_exc()
def build_all_dashboards(metricsDefinitionFile, testcaseDashboardsDir, clusterConfig):
metricsHtmlFile = testcaseDashboardsDir + "/metrics.html"
centralDashboard = PyH('Kafka Metrics Dashboard')
centralDashboard << h1('Kafka Metrics Dashboard', cl='center')
roles = set(map(lambda config: config['role'], clusterConfig))
for role in roles:
entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role)
dashboardPagePath = build_dashboard_for_role(metricsDefinitionFile, role,
entities, testcaseDashboardsDir)
centralDashboard << a(role, href = dashboardPagePath)
centralDashboard << br()
centralDashboard.printOut(metricsHtmlFile)
def build_dashboard_for_role(metricsDefinitionFile, role, entities, testcaseDashboardsDir):
# build all dashboards for the input entity's based on its role. It can be one of kafka, zookeeper, producer
# consumer
dashboards = get_dashboard_definition(metricsDefinitionFile, role)
entityDashboard = PyH('Kafka Metrics Dashboard for ' + role)
entityDashboard << h1('Kafka Metrics Dashboard for ' + role, cl='center')
entityDashboardHtml = testcaseDashboardsDir + "/" + role + "-dashboards.html"
for dashboard in dashboards:
# place the graph svg files in this dashboard
allGraphs = dashboard['graphs']
for graph in allGraphs:
attributes = map(lambda attribute: graph['bean_name'] + ':' + attribute,
graph['attributes'].split(','))
for attribute in attributes:
graphFileLocation = testcaseDashboardsDir + "/" + role + "/" + attribute + ".svg"
entityDashboard << embed(src = graphFileLocation, type = "image/svg+xml")
entityDashboard.printOut(entityDashboardHtml)
return entityDashboardHtml
def start_metrics_collection(jmxHost, jmxPort, role, entityId, systemTestEnv, testcaseEnv):
logger.info("starting metrics collection on jmx port : " + jmxPort, extra=d)
jmxUrl = "service:jmx:rmi:///jndi/rmi://" + jmxHost + ":" + jmxPort + "/jmxrmi"
clusterConfig = systemTestEnv.clusterEntityConfigDictList
metricsDefinitionFile = systemTestEnv.METRICS_PATHNAME
entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entityId, "metrics")
dashboardsForRole = get_dashboard_definition(metricsDefinitionFile, role)
mbeansForRole = get_mbeans_for_role(dashboardsForRole)
kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "kafka_home")
javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "java_home")
for mbean in mbeansForRole:
outputCsvFile = entityMetricsDir + "/" + mbean + ".csv"
startMetricsCmdList = ["ssh " + jmxHost,
"'JAVA_HOME=" + javaHome,
"JMX_PORT= " + kafkaHome + "/bin/kafka-run-class.sh kafka.tools.JmxTool",
"--jmx-url " + jmxUrl,
"--object-name " + mbean + " 1> ",
outputCsvFile + " & echo pid:$! > ",
entityMetricsDir + "/entity_pid'"]
startMetricsCommand = " ".join(startMetricsCmdList)
logger.debug("executing command: [" + startMetricsCommand + "]", extra=d)
system_test_utils.async_sys_call(startMetricsCommand)
time.sleep(1)
pidCmdStr = "ssh " + jmxHost + " 'cat " + entityMetricsDir + "/entity_pid' 2> /dev/null"
logger.debug("executing command: [" + pidCmdStr + "]", extra=d)
subproc = system_test_utils.sys_call_return_subproc(pidCmdStr)
# keep track of JMX ppid in a dictionary of entity_id to list of JMX ppid
# testcaseEnv.entityJmxParentPidDict:
# key: entity_id
# val: list of JMX ppid associated to that entity_id
# { 1: [1234, 1235, 1236], 2: [2234, 2235, 2236], ... }
for line in subproc.stdout.readlines():
line = line.rstrip('\n')
logger.debug("line: [" + line + "]", extra=d)
if line.startswith("pid"):
logger.debug("found pid line: [" + line + "]", extra=d)
tokens = line.split(':')
thisPid = tokens[1]
if entityId not in testcaseEnv.entityJmxParentPidDict:
testcaseEnv.entityJmxParentPidDict[entityId] = []
testcaseEnv.entityJmxParentPidDict[entityId].append(thisPid)
#print "\n#### testcaseEnv.entityJmxParentPidDict ", testcaseEnv.entityJmxParentPidDict, "\n"
def stop_metrics_collection(jmxHost, jmxPort):
logger.info("stopping metrics collection on " + jmxHost + ":" + jmxPort, extra=d)
system_test_utils.sys_call("ps -ef | grep JmxTool | grep -v grep | grep " + jmxPort + " | awk '{print $2}' | xargs kill -9")
def get_mbeans_for_role(dashboardsForRole):
graphs = reduce(lambda x,y: x+y, map(lambda dashboard: dashboard['graphs'], dashboardsForRole))
return set(map(lambda metric: metric['bean_name'], graphs))
| apache-2.0 |
simon-pepin/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
pyspeckit/pyspeckit | pyspeckit/cubes/mapplot.py | 4 | 16759 | """
MapPlot
-------
Make plots of the cube and interactively connect them to spectrum plotting.
This is really an interactive component of the package; nothing in here is
meant for publication-quality plots, but more for user interactive analysis.
That said, the plotter makes use of `APLpy <https://github.com/aplpy/aplpy>`_,
so it is possible to make publication-quality plots.
:author: Adam Ginsburg
:date: 03/17/2011
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function
import matplotlib
import matplotlib.figure
import numpy as np
import copy
import itertools
import six
try:
import astropy.wcs as pywcs
import astropy.io.fits as pyfits
pywcsOK = True
except ImportError:
try:
import pyfits
import pywcs
pywcsOK = True
except ImportError:
pywcsOK = False
try:
import aplpy
icanhasaplpy = True
except: # aplpy fails with generic exceptions instead of ImportError
icanhasaplpy = False
from . import cubes
class MapPlotter(object):
"""
Class to plot a spectrum
See `mapplot` for use documentation; this docstring is only for
initialization.
"""
def __init__(self, Cube=None, figure=None, doplot=False, **kwargs):
"""
Create a map figure for future plotting
"""
import matplotlib.pyplot
self._pyplot = matplotlib.pyplot
# figure out where to put the plot
if isinstance(figure,matplotlib.figure.Figure):
self.figure = figure
elif type(figure) is int:
self.figure = self._pyplot.figure(figure)
else:
self.figure = None
self.axis = None
self.FITSFigure = None
self._click_marks = []
self._circles = []
self._clickX = None
self._clickY = None
self.overplot_colorcycle = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y'])
self.overplot_linestyle = '-'
self.Cube = Cube
if self.Cube is not None:
self.header = cubes.flatten_header(self.Cube.header, delete=True)
if pywcsOK:
self.wcs = pywcs.WCS(self.header)
if doplot: self.mapplot(**kwargs)
def __call__(self, **kwargs):
""" see mapplot """
return self.mapplot(**kwargs)
def mapplot(self, convention='calabretta', colorbar=True, useaplpy=True,
vmin=None, vmax=None, cmap=None, plotkwargs={}, **kwargs):
"""
Plot up a map based on an input data cube.
The map to be plotted is selected using `makeplane`.
The `estimator` keyword argument is passed to that function.
The plotted map, once shown, is interactive. You can click on it with any
of the three mouse buttons.
Button 1 or keyboard '1':
Plot the selected pixel's spectrum in another window. Mark the
clicked pixel with an 'x'
Button 2 or keyboard 'o':
Overplot a second (or third, fourth, fifth...) spectrum in the
external plot window
Button 3:
Disconnect the interactive viewer
You can also click-and-drag with button 1 to average over a circular
region. This same effect can be achieved by using the 'c' key to
set the /c/enter of a circle and the 'r' key to set its /r/adius (i.e.,
hover over the center and press 'c', then hover some distance away and
press 'r').
Parameters
----------
convention : 'calabretta' or 'griesen'
The default projection to assume for Galactic data when plotting
with aplpy.
colorbar : bool
Whether to show a colorbar
plotkwargs : dict, optional
A dictionary of keyword arguments to pass to aplpy.show_colorscale
or matplotlib.pyplot.imshow
useaplpy : bool
Use aplpy if a FITS header is available
vmin, vmax: float or None
Override values for the vmin/vmax values. Will be automatically
determined if left as None
.. todo:
Allow mapplot in subfigure
"""
if (self.figure is None):
self.figure = self._pyplot.figure()
elif (not self._pyplot.fignum_exists(self.figure.number)):
self.figure = self._pyplot.figure()
else:
self._disconnect()
self.figure.clf()
# this is where the map is created; everything below this is just plotting
self.makeplane(**kwargs)
# have tot pop out estimator so that kwargs can be passed to imshow
if 'estimator' in kwargs:
kwargs.pop('estimator')
# Below here is all plotting stuff
if vmin is None: vmin = self.plane[self.plane==self.plane].min()
if vmax is None: vmax = self.plane[self.plane==self.plane].max()
if icanhasaplpy and useaplpy:
self.fitsfile = pyfits.PrimaryHDU(data=self.plane,header=self.header)
self.FITSFigure = aplpy.FITSFigure(self.fitsfile,figure=self.figure,convention=convention)
self.FITSFigure.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap, **plotkwargs)
if hasattr(self.FITSFigure, '_ax1'):
self.axis = self.FITSFigure._ax1
else:
self.axis = self.FITSFigure.ax
if colorbar:
try:
self.FITSFigure.add_colorbar()
except Exception as ex:
print("ERROR: Could not create colorbar! Error was %s" % str(ex))
self._origin = 0 # FITS convention
# TODO: set _origin to 1 if using PIXEL units, not real wcs
else:
self.axis = self.figure.add_subplot(111)
if hasattr(self,'colorbar') and self.colorbar is not None:
if self.colorbar.ax in self.axis.figure.axes:
self.axis.figure.delaxes(self.colorbar.ax)
self.axis.imshow(self.plane, vmin=vmin, vmax=vmax, cmap=cmap, **plotkwargs)
if colorbar:
try:
self.colorbar = self._pyplot.colorbar(self.axis.images[0])
except Exception as ex:
print("ERROR: Could not create colorbar! Error was %s" % str(ex))
self._origin = 0 # normal convention
self.canvas = self.axis.figure.canvas
self._connect()
def _connect(self):
""" Connect click, click up (release click), and key press to events """
self.clickid = self.canvas.callbacks.connect('button_press_event',self.click)
self.clickupid = self.canvas.callbacks.connect('button_release_event',self.plot_spectrum)
self.keyid = self.canvas.callbacks.connect('key_press_event',self.plot_spectrum)
def _disconnect(self):
""" Disconnect click, click up (release click), and key press from events """
if hasattr(self,'canvas'):
self.canvas.mpl_disconnect(self.clickid)
self.canvas.mpl_disconnect(self.clickupid)
self.canvas.mpl_disconnect(self.keyid)
def makeplane(self, estimator=np.nanmean):
"""
Create a "plane" view of the cube, either by slicing or projecting it
or by showing a slice from the best-fit model parameter cube.
Parameters
----------
estimator : [ function | 'max' | 'int' | FITS filename | integer | slice ]
A non-pythonic, non-duck-typed variable. If it's a function, apply that function
along the cube's spectral axis to obtain an estimate (e.g., mean, min, max, etc.).
'max' will do the same thing as passing np.max
'int' will attempt to integrate the image (which is why I didn't duck-type)
(integrate means sum and multiply by dx)
a .fits filename will be read using pyfits (so you can make your own cover figure)
an integer will get the n'th slice in the parcube if it exists
If it's a slice, slice the input data cube along the Z-axis with this slice
"""
# THIS IS A HACK!!! isinstance(a function, function) must be a thing...
FUNCTION = type(np.max)
# estimator is NOT duck-typed
if type(estimator) is FUNCTION:
self.plane = estimator(self.Cube.cube,axis=0)
elif isinstance(estimator, six.string_types):
if estimator == 'max':
self.plane = self.Cube.cube.max(axis=0)
elif estimator == 'int':
dx = np.abs(self.Cube.xarr[1:] - self.Cube.xarr[:-1])
dx = np.concatenate([dx,[dx[-1]]])
self.plane = (self.Cube.cube * dx[:,np.newaxis,np.newaxis]).sum(axis=0)
elif estimator[-5:] == ".fits":
self.plane = pyfits.getdata(estimator)
elif type(estimator) is slice:
self.plane = self.Cube.cube[estimator,:,:]
elif type(estimator) is int:
if hasattr(self.Cube,'parcube'):
self.plane = self.Cube.parcube[estimator,:,:]
if self.plane is None:
raise ValueError("Invalid estimator %s" % (str(estimator)))
if np.sum(np.isfinite(self.plane)) == 0:
raise ValueError("Map is all NaNs or infs. Check your estimator or your input cube.")
def click(self,event):
"""
Record location of downclick
"""
if event.inaxes:
self._clickX = np.round(event.xdata) - self._origin
self._clickY = np.round(event.ydata) - self._origin
def plot_spectrum(self, event, plot_fit=True):
"""
Connects map cube to Spectrum...
"""
self.event = event
if event.inaxes:
clickX = np.round(event.xdata) - self._origin
clickY = np.round(event.ydata) - self._origin
# grab toolbar info so that we don't do anything if a tool is selected
tb = self.canvas.toolbar
if tb.mode != '':
return
elif event.key is not None:
if event.key == 'c':
self._center = (clickX-1,clickY-1)
self._remove_circle()
self._add_click_mark(clickX,clickY,clear=True)
elif event.key == 'r':
x,y = self._center
self._add_circle(x,y,clickX,clickY)
self.circle(x,y,clickX-1,clickY-1)
elif event.key == 'o':
clickX,clickY = round(clickX),round(clickY)
print("OverPlotting spectrum from point %i,%i" % (clickX-1,clickY-1))
color = next(self.overplot_colorcycle)
self._add_click_mark(clickX,clickY,clear=False, color=color)
self.Cube.plot_spectrum(clickX-1,clickY-1,clear=False, color=color, linestyle=self.overplot_linestyle)
elif event.key in ('1','2'):
event.button = int(event.key)
event.key = None
self.plot_spectrum(event)
elif (hasattr(event,'button') and event.button in (1,2)
and not (self._clickX == clickX and self._clickY == clickY)):
if event.button == 1:
self._remove_circle()
clear=True
color = 'k'
linestyle = 'steps-mid'
else:
color = next(self.overplot_colorcycle)
linestyle = self.overplot_linestyle
clear=False
rad = ( (self._clickX-clickX)**2 + (self._clickY-clickY)**2 )**0.5
print("Plotting circle from point %i,%i to %i,%i (r=%f)" % (self._clickX,self._clickY,clickX,clickY,rad))
self._add_circle(self._clickX,self._clickY,clickX,clickY)
self.circle(self._clickX,self._clickY,clickX,clickY,clear=clear,linestyle=linestyle,color=color)
elif hasattr(event,'button') and event.button is not None:
if event.button==1:
clickX,clickY = round(clickX),round(clickY)
print("Plotting spectrum from point %i,%i" % (clickX,clickY))
self._remove_circle()
self._add_click_mark(clickX,clickY,clear=True)
self.Cube.plot_spectrum(clickX,clickY,clear=True)
if plot_fit: self.Cube.plot_fit(clickX, clickY, silent=True)
elif event.button==2:
clickX,clickY = round(clickX),round(clickY)
print("OverPlotting spectrum from point %i,%i" % (clickX,clickY))
color = next(self.overplot_colorcycle)
self._add_click_mark(clickX,clickY,clear=False, color=color)
self.Cube.plot_spectrum(clickX,clickY,clear=False, color=color, linestyle=self.overplot_linestyle)
elif event.button==3:
print("Disconnecting GAIA-like tool")
self._disconnect()
else:
print("Call failed for some reason: ")
print("event: ",event)
else:
pass
# never really needed... warn("Click outside of axes")
def _add_click_mark(self,x,y,clear=False,color='k'):
"""
Add an X at some position
"""
if clear:
self._clear_click_marks()
if self.FITSFigure is not None:
label = 'xmark%i' % (len(self._click_marks)+1)
x,y = self.FITSFigure.pixel2world(x,y)
self.FITSFigure.show_markers(x,y,marker='x',c=color,layer=label)
self._click_marks.append( label )
else:
self._click_marks.append( self.axis.plot(x,y,'kx') )
self.refresh()
def _clear_click_marks(self):
"""
Remove all marks added by previous clicks
"""
if self.FITSFigure is not None:
for mark in self._click_marks:
if mark in self.FITSFigure._layers:
self.FITSFigure.remove_layer(mark)
else:
for mark in self._click_marks:
self._click_marks.remove(mark)
if mark in self.axis.lines:
self.axis.lines.remove(mark)
self.refresh()
def _add_circle(self,x,y,x2,y2,**kwargs):
"""
"""
if self.FITSFigure is not None:
x,y = self.FITSFigure.pixel2world(x,y)
x2,y2 = self.FITSFigure.pixel2world(x2,y2)
r = (np.linalg.norm(np.array([x,y])-np.array([x2,y2])))
#self.FITSFigure.show_markers(x,y,s=r,marker='o',facecolor='none',edgecolor='black',layer='circle')
layername = "circle%02i" % len(self._circles)
self.FITSFigure.show_circles(x,y,r,edgecolor='black',facecolor='none',layer=layername,**kwargs)
self._circles.append(layername)
else:
r = np.linalg.norm(np.array([x,y])-np.array([x2,y2]))
circle = matplotlib.patches.Circle([x,y],radius=r,**kwargs)
self._circles.append( circle )
self.axis.patches.append(circle)
self.refresh()
def _remove_circle(self):
"""
"""
if self.FITSFigure is not None:
for layername in self._circles:
if layername in self.FITSFigure._layers:
self.FITSFigure.remove_layer(layername)
else:
for circle in self._circles:
if circle in self.axis.patches:
self.axis.patches.remove(circle)
self._circles.remove(circle)
self.refresh()
def refresh(self):
if self.axis is not None:
self.axis.figure.canvas.draw()
def circle(self,x1,y1,x2,y2,**kwargs):
"""
Plot the spectrum of a circular aperture
"""
r = (np.linalg.norm(np.array([x1,y1])-np.array([x2,y2])))
self.Cube.plot_apspec([x1,y1,r],**kwargs)
#self.Cube.data = cubes.extract_aperture( self.Cube.cube, [x1,y1,r] , coordsys=None )
#self.Cube.plotter()
def copy(self, parent=None):
"""
Create a copy of the map plotter with blank (uninitialized) axis & figure
[ parent ]
A spectroscopic axis instance that is the parent of the specfit
instance. This needs to be specified at some point, but defaults
to None to prevent overwriting a previous plot.
"""
newmapplot = copy.copy(self)
newmapplot.Cube = parent
newmapplot.axis = None
newmapplot.figure = None
return newmapplot
| mit |
code-sauce/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 88 | 31139 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert (isinstance(n_classes, dict))
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else \
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if x_is_dict else check_array(y, y.dtype)
# self.n_classes is not None means we're converting raw target indices to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict \
else _check_dtype(self._x.dtype)
# note: self._output_dtype = np.float32 when y is None
self._output_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict \
else _check_dtype(self._y.dtype) if y is not None else np.float32
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
paulgradie/SeqPyPlot | main_app/seqpyplot/parsers/htseq_parser.py | 1 | 2244 | """
Read a directory of expression counts in ht-seq format. Each sample
should be an individual file in the directory. File names and
sample order are specified in the config file (order is determined
by order IN the config.)
This class is intended to return the raw dataframe of samples with
missing sample columns as NaN.
"""
import pandas as pd
from pathos.multiprocessing import ProcessPool
import pathlib
try:
from functools import reduce # for py3 compatibility
except ImportError:
pass
class HtSeqParser(object):
def __init__(self, nodes=2):
self.nodes = nodes
def parse_data(self, data_paths, sample_names):
"""
Read the input files from the config file and load in to a
pandas dataframe.
params
data_paths: list of file paths specified in the config. Returned
from config parse sample_names: list of sample names specified in
the config returned from config parse
"""
output = self.load_data(data_paths, sample_names)
data, ercc_df = (self.merge_dfs(output)
.pipe(self.df_cleanup)
.pipe(self.split_on_ercc))
return data, ercc_df
def load_data(self, data_paths, sample_names):
" Multiprocess load of files in to a list of dfs "
pool = ProcessPool(nodes=self.nodes)
dfs = pool.map(self.load_func, zip(data_paths, sample_names))
return dfs
@staticmethod
def load_func(data_tuple):
path, sample_name = data_tuple
return pd.read_csv(path, sep='\t', names=['gene', sample_name])
def merge_dfs(self, dfs):
return reduce(lambda x, y: pd.merge(x, y, on='gene', how='outer'), dfs)
def df_cleanup(self, df_old):
" Clean away unwanted columns, reset index, and fillna "
df = df_old.copy()
df = df[df['gene'].str.startswith('__') == False]
df.set_index('gene', inplace=True)
df.fillna(value='Nan', inplace=True)
return df
def split_on_ercc(self, df):
" Extract the ERCC data "
ercc_cols = df.index.str.startswith('ERCC-')
ercc_df = df[ercc_cols]
data = df[~ercc_cols]
return data, ercc_df
| gpl-3.0 |
dmlc/xgboost | tests/python/test_with_pandas.py | 1 | 10402 | # -*- coding: utf-8 -*-
import numpy as np
import xgboost as xgb
import testing as tm
import pytest
try:
import pandas as pd
except ImportError:
pass
pytestmark = pytest.mark.skipif(**tm.no_pandas())
dpath = 'demo/data/'
rng = np.random.RandomState(1994)
class TestPandas:
def test_pandas(self):
df = pd.DataFrame([[1, 2., True], [2, 3., False]],
columns=['a', 'b', 'c'])
dm = xgb.DMatrix(df, label=pd.Series([1, 2]))
assert dm.feature_names == ['a', 'b', 'c']
assert dm.feature_types == ['int', 'float', 'i']
assert dm.num_row() == 2
assert dm.num_col() == 3
np.testing.assert_array_equal(dm.get_label(), np.array([1, 2]))
# overwrite feature_names and feature_types
dm = xgb.DMatrix(df, label=pd.Series([1, 2]),
feature_names=['x', 'y', 'z'],
feature_types=['q', 'q', 'q'])
assert dm.feature_names == ['x', 'y', 'z']
assert dm.feature_types == ['q', 'q', 'q']
assert dm.num_row() == 2
assert dm.num_col() == 3
# incorrect dtypes
df = pd.DataFrame([[1, 2., 'x'], [2, 3., 'y']],
columns=['a', 'b', 'c'])
with pytest.raises(ValueError):
xgb.DMatrix(df)
# numeric columns
df = pd.DataFrame([[1, 2., True], [2, 3., False]])
dm = xgb.DMatrix(df, label=pd.Series([1, 2]))
assert dm.feature_names == ['0', '1', '2']
assert dm.feature_types == ['int', 'float', 'i']
assert dm.num_row() == 2
assert dm.num_col() == 3
np.testing.assert_array_equal(dm.get_label(), np.array([1, 2]))
df = pd.DataFrame([[1, 2., 1], [2, 3., 1]], columns=[4, 5, 6])
dm = xgb.DMatrix(df, label=pd.Series([1, 2]))
assert dm.feature_names == ['4', '5', '6']
assert dm.feature_types == ['int', 'float', 'int']
assert dm.num_row() == 2
assert dm.num_col() == 3
df = pd.DataFrame({'A': ['X', 'Y', 'Z'], 'B': [1, 2, 3]})
dummies = pd.get_dummies(df)
# B A_X A_Y A_Z
# 0 1 1 0 0
# 1 2 0 1 0
# 2 3 0 0 1
result, _, _ = xgb.data._transform_pandas_df(dummies,
enable_categorical=False)
exp = np.array([[1., 1., 0., 0.],
[2., 0., 1., 0.],
[3., 0., 0., 1.]])
np.testing.assert_array_equal(result, exp)
dm = xgb.DMatrix(dummies)
assert dm.feature_names == ['B', 'A_X', 'A_Y', 'A_Z']
assert dm.feature_types == ['int', 'int', 'int', 'int']
assert dm.num_row() == 3
assert dm.num_col() == 4
df = pd.DataFrame({'A=1': [1, 2, 3], 'A=2': [4, 5, 6]})
dm = xgb.DMatrix(df)
assert dm.feature_names == ['A=1', 'A=2']
assert dm.feature_types == ['int', 'int']
assert dm.num_row() == 3
assert dm.num_col() == 2
df_int = pd.DataFrame([[1, 1.1], [2, 2.2]], columns=[9, 10])
dm_int = xgb.DMatrix(df_int)
df_range = pd.DataFrame([[1, 1.1], [2, 2.2]], columns=range(9, 11, 1))
dm_range = xgb.DMatrix(df_range)
assert dm_int.feature_names == ['9', '10'] # assert not "9 "
assert dm_int.feature_names == dm_range.feature_names
# test MultiIndex as columns
df = pd.DataFrame(
[
(1, 2, 3, 4, 5, 6),
(6, 5, 4, 3, 2, 1)
],
columns=pd.MultiIndex.from_tuples((
('a', 1), ('a', 2), ('a', 3),
('b', 1), ('b', 2), ('b', 3),
))
)
dm = xgb.DMatrix(df)
assert dm.feature_names == ['a 1', 'a 2', 'a 3', 'b 1', 'b 2', 'b 3']
assert dm.feature_types == ['int', 'int', 'int', 'int', 'int', 'int']
assert dm.num_row() == 2
assert dm.num_col() == 6
def test_slice(self):
rng = np.random.RandomState(1994)
rows = 100
X = rng.randint(3, 7, size=rows)
X = pd.DataFrame({'f0': X})
y = rng.randn(rows)
ridxs = [1, 2, 3, 4, 5, 6]
m = xgb.DMatrix(X, y)
sliced = m.slice(ridxs)
assert m.feature_types == sliced.feature_types
def test_pandas_categorical(self):
rng = np.random.RandomState(1994)
rows = 100
X = rng.randint(3, 7, size=rows)
X = pd.Series(X, dtype="category")
X = pd.DataFrame({'f0': X})
y = rng.randn(rows)
m = xgb.DMatrix(X, y, enable_categorical=True)
assert m.feature_types[0] == 'categorical'
def test_pandas_sparse(self):
import pandas as pd
rows = 100
X = pd.DataFrame(
{"A": pd.arrays.SparseArray(np.random.randint(0, 10, size=rows)),
"B": pd.arrays.SparseArray(np.random.randn(rows)),
"C": pd.arrays.SparseArray(np.random.permutation(
[True, False] * (rows // 2)))}
)
y = pd.Series(pd.arrays.SparseArray(np.random.randn(rows)))
dtrain = xgb.DMatrix(X, y)
booster = xgb.train({}, dtrain, num_boost_round=4)
predt_sparse = booster.predict(xgb.DMatrix(X))
predt_dense = booster.predict(xgb.DMatrix(X.sparse.to_dense()))
np.testing.assert_allclose(predt_sparse, predt_dense)
def test_pandas_label(self):
# label must be a single column
df = pd.DataFrame({'A': ['X', 'Y', 'Z'], 'B': [1, 2, 3]})
with pytest.raises(ValueError):
xgb.data._transform_pandas_df(df, False, None, None, 'label', 'float')
# label must be supported dtype
df = pd.DataFrame({'A': np.array(['a', 'b', 'c'], dtype=object)})
with pytest.raises(ValueError):
xgb.data._transform_pandas_df(df, False, None, None, 'label', 'float')
df = pd.DataFrame({'A': np.array([1, 2, 3], dtype=int)})
result, _, _ = xgb.data._transform_pandas_df(df, False, None, None,
'label', 'float')
np.testing.assert_array_equal(result, np.array([[1.], [2.], [3.]],
dtype=float))
dm = xgb.DMatrix(np.random.randn(3, 2), label=df)
assert dm.num_row() == 3
assert dm.num_col() == 2
def test_pandas_weight(self):
kRows = 32
kCols = 8
X = np.random.randn(kRows, kCols)
y = np.random.randn(kRows)
w = np.random.uniform(size=kRows).astype(np.float32)
w_pd = pd.DataFrame(w)
data = xgb.DMatrix(X, y, w_pd)
assert data.num_row() == kRows
assert data.num_col() == kCols
np.testing.assert_array_equal(data.get_weight(), w)
def test_cv_as_pandas(self):
dm = xgb.DMatrix(dpath + 'agaricus.txt.train')
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic', 'eval_metric': 'error'}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10)
assert isinstance(cv, pd.DataFrame)
exp = pd.Index([u'test-error-mean', u'test-error-std',
u'train-error-mean', u'train-error-std'])
assert len(cv.columns.intersection(exp)) == 4
# show progress log (result is the same as above)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
verbose_eval=True)
assert isinstance(cv, pd.DataFrame)
exp = pd.Index([u'test-error-mean', u'test-error-std',
u'train-error-mean', u'train-error-std'])
assert len(cv.columns.intersection(exp)) == 4
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
verbose_eval=True, show_stdv=False)
assert isinstance(cv, pd.DataFrame)
exp = pd.Index([u'test-error-mean', u'test-error-std',
u'train-error-mean', u'train-error-std'])
assert len(cv.columns.intersection(exp)) == 4
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic', 'eval_metric': 'auc'}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=True)
assert 'eval_metric' in params
assert 'auc' in cv.columns[0]
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic', 'eval_metric': ['auc']}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=True)
assert 'eval_metric' in params
assert 'auc' in cv.columns[0]
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic', 'eval_metric': ['auc']}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
as_pandas=True, early_stopping_rounds=1)
assert 'eval_metric' in params
assert 'auc' in cv.columns[0]
assert cv.shape[0] < 10
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic'}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
as_pandas=True, metrics='auc')
assert 'auc' in cv.columns[0]
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic'}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
as_pandas=True, metrics=['auc'])
assert 'auc' in cv.columns[0]
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic', 'eval_metric': ['auc']}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
as_pandas=True, metrics='error')
assert 'eval_metric' in params
assert 'auc' not in cv.columns[0]
assert 'error' in cv.columns[0]
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
as_pandas=True, metrics=['error'])
assert 'eval_metric' in params
assert 'auc' not in cv.columns[0]
assert 'error' in cv.columns[0]
params = list(params.items())
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
as_pandas=True, metrics=['error'])
assert isinstance(params, list)
assert 'auc' not in cv.columns[0]
assert 'error' in cv.columns[0]
| apache-2.0 |
AlfredNeverKog/BrainCarya | src/my/kadenze/lesson3/mnist_autoencoder.py | 1 | 2610 | from mnist import MNIST
import numpy as np
import tensorflow as tf
from src.my.lib.utils import montage
import matplotlib.pyplot as plt
from PIL import Image
src = '../../../../data/mnist/'
output='./content/1/%s.jpg'
mndata = MNIST(src)
data = np.array(mndata.load_testing())
X = data[0]
Y = data[1]
items = 100
imgs = np.array([i for i in np.array(X[:items])]).reshape(items,28,28)
n_features = 784
n_input = n_features
Y = imgs.reshape(items,n_features).astype(float)
current_input = imgs.reshape(items,n_features).astype(float)
Ws = []
Bs = []
dimensions = [512,256,128,64]
for layer_i,n_ouputs in enumerate(dimensions):
with tf.variable_scope("encoder/variable/%s" % layer_i):
W = tf.get_variable(name="weight%s" % layer_i, dtype=tf.float64,
initializer=tf.contrib.layers.xavier_initializer(),
shape=[n_input, n_ouputs])
#B = tf.get_variable(name='bias%s' % layer_i, dtype=tf.float64,
# initializer=tf.random_normal_initializer(mean=0.0, stddev=1.1),
# shape=[n_ouputs])
#h = tf.nn.bias_add(value=tf.matmul(current_input, W),
# bias=B)
h = tf.matmul(current_input, W)
current_input = h
current_input = tf.nn.relu(current_input)
n_input = n_ouputs
Ws.append(W)
#Bs.append()
Ws = Ws[::-1]#reverse
Bs = Bs[::-1]#reverse
#dimensions = dimensions[::1][1:].append(n_features)
dimensions = dimensions[::-1][1:] +[n_features]
#Build DECODER
for layer_i,n_ouputs in enumerate(dimensions):
with tf.variable_scope("encoder/variable/%s" % layer_i):
##128x64 -> 64x128
h = value=tf.matmul(current_input,tf.transpose(Ws[layer_i]))
if layer_i + 1 < len(Bs):
h = tf.nn.bias_add(h,bias=Bs[layer_i + 1])
current_input = h
current_input = tf.nn.relu(current_input)
n_input = n_ouputs
loss_func = tf.reduce_mean(tf.squared_difference(current_input, Y), 1)
optimizer = tf.train.AdamOptimizer(learning_rate=0.00001)
train = optimizer.minimize(loss_func)
counter = 0
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for i in range(50000):
sess.run(train)
if i % 15 == 0:
Image.fromarray(montage(sess.run(current_input).reshape(items,28,28)).astype(np.uint8)) \
.save(output % ("0"*(5 - len(str(counter))) + str(counter)))
print(sess.run(tf.reduce_mean(loss_func)))
counter += 1
| mit |
f3r/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 159 | 2951 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='darkorange', lw=2)
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='darkorange', lw=2)
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
phobson/statsmodels | statsmodels/sandbox/examples/example_crossval.py | 33 | 2232 |
import numpy as np
from statsmodels.sandbox.tools import cross_val
if __name__ == '__main__':
#A: josef-pktd
import statsmodels.api as sm
from statsmodels.api import OLS
#from statsmodels.datasets.longley import load
from statsmodels.datasets.stackloss import load
from statsmodels.iolib.table import (SimpleTable, default_txt_fmt,
default_latex_fmt, default_html_fmt)
import numpy as np
data = load()
data.exog = sm.tools.add_constant(data.exog, prepend=False)
resols = sm.OLS(data.endog, data.exog).fit()
print('\n OLS leave 1 out')
for inidx, outidx in cross_val.LeaveOneOut(len(data.endog)):
res = sm.OLS(data.endog[inidx], data.exog[inidx,:]).fit()
print(data.endog[outidx], res.model.predict(res.params, data.exog[outidx,:], end=' '))
print(data.endog[outidx] - res.model.predict(res.params, data.exog[outidx,:]))
print('\n OLS leave 2 out')
resparams = []
for inidx, outidx in cross_val.LeavePOut(len(data.endog), 2):
res = sm.OLS(data.endog[inidx], data.exog[inidx,:]).fit()
#print data.endog[outidx], res.model.predict(data.exog[outidx,:]),
#print ((data.endog[outidx] - res.model.predict(data.exog[outidx,:]))**2).sum()
resparams.append(res.params)
resparams = np.array(resparams)
print(resparams)
doplots = 1
if doplots:
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
plt.figure()
figtitle = 'Leave2out parameter estimates'
t = plt.gcf().text(0.5,
0.95, figtitle,
horizontalalignment='center',
fontproperties=FontProperties(size=16))
for i in range(resparams.shape[1]):
plt.subplot(4, 2, i+1)
plt.hist(resparams[:,i], bins = 10)
#plt.title("Leave2out parameter estimates")
plt.show()
for inidx, outidx in cross_val.KStepAhead(20,2):
#note the following were broken because KStepAhead returns now a slice by default
print(inidx)
print(np.ones(20)[inidx].sum(), np.arange(20)[inidx][-4:])
print(outidx)
print(np.nonzero(np.ones(20)[outidx])[0][()])
| bsd-3-clause |
UltronAI/Deep-Learning | Pattern-Recognition/hw2-Feature-Selection/skfeature/example/test_JMI.py | 1 | 1528 | import scipy.io
from sklearn.metrics import accuracy_score
from sklearn import cross_validation
from sklearn import svm
from skfeature.function.information_theoretical_based import JMI
def main():
# load data
mat = scipy.io.loadmat('../data/colon.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 10 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the index of each feature on the training set
idx,_,_ = JMI.jmi(X[train], y[train], n_selected_features=num_fea)
# obtain the dataset on the selected features
features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main()
| mit |
ligovirgo/seismon | RfPrediction/BLRMS_Prediction/condor_seismic_peaks.py | 1 | 1969 |
import os, sys
import glob
import optparse
import tables
import pandas as pd
import numpy as np
import h5py
def parse_commandline():
"""
Parse the options given on the command-line.
"""
parser = optparse.OptionParser()
parser.add_option('-i','--ifos', type=str, default='LHO,LLO', help='GW Observatories: LLO,LHO...')
opts, args = parser.parse_args()
return opts
# Parse command line
opts = parse_commandline()
condorDir = './'
logDir = os.path.join(condorDir,'logs')
if not os.path.isdir(logDir):
os.makedirs(logDir)
condordag = os.path.join(condorDir,'condor.dag')
fid = open(condordag,'w')
condorsh = os.path.join(condorDir,'condor.sh')
fid1 = open(condorsh,'w')
job_number = 0
ifos = opts.ifos.split(",")
for ifo in ifos:
x = np.genfromtxt('./masterlists/{}.dat'.format(ifo))
for ii,row in enumerate(x):
fid1.write('python fetch_seismic_peaks.py -i %s -ID %d -blrmsBand 30M_100M -saveResult 1 -saveImage 0\n'%(ifo,ii))
fid.write('JOB %d condor.sub\n'%(job_number))
fid.write('RETRY %d 3\n'%(job_number))
fid.write('VARS %d jobNumber="%d" ifo="%s" id="%d"\n'%(job_number,job_number, ifo, ii))
fid.write('\n\n')
job_number = job_number + 1
fid1.close()
fid.close()
fid = open(os.path.join(condorDir,'condor.sub'),'w')
fid.write('executable = ./fetch_seismic_peaks.py\n')
fid.write('output = logs/out.$(jobNumber)\n');
fid.write('error = logs/err.$(jobNumber)\n');
fid.write('arguments = -IFO $(ifo) -ID $(id) -blrmsBand 30M_100M -saveResult 1 -saveImage 0\n')
fid.write('requirements = OpSys == "LINUX"\n');
fid.write('request_memory = 8192\n');
fid.write('request_cpus = 1\n');
fid.write('accounting_group = ligo.dev.o2.burst.allsky.stamp\n');
fid.write('notification = never\n');
fid.write('getenv = true\n');
fid.write('log = /usr1/mcoughlin/seismon.log\n')
fid.write('+MaxHours = 24\n');
fid.write('universe = vanilla\n');
fid.write('queue 1\n');
fid.close()
| gpl-3.0 |
raincoatrun/basemap | examples/testgdal.py | 4 | 2655 | """
example showing how to plot data from a DEM file and an ESRI shape file using
gdal (http://pypi.python.org/pypi/GDAL).
"""
from osgeo import gdal, ogr
from mpl_toolkits.basemap import Basemap, cm
import numpy as np
import matplotlib.pyplot as plt
from numpy import ma
# read 2.5 minute U.S. DEM file using gdal.
# (http://www.prism.oregonstate.edu/docs/meta/dem_25m.htm)
gd = gdal.Open('us_25m.dem')
array = gd.ReadAsArray()
# get lat/lon coordinates from DEM file.
coords = gd.GetGeoTransform()
nlons = array.shape[1]; nlats = array.shape[0]
delon = coords[1]
delat = coords[5]
lons = coords[0] + delon*np.arange(nlons)
lats = coords[3] + delat*np.arange(nlats)[::-1] # reverse lats
# setup figure.
fig = plt.figure(figsize=(11,6))
# setup basemap instance.
m = Basemap(llcrnrlon=-119,llcrnrlat=22,urcrnrlon=-64,urcrnrlat=49,
projection='lcc',lat_1=33,lat_2=45,lon_0=-95)
# create masked array, reversing data in latitude direction
# (so that data is oriented in increasing latitude, as transform_scalar requires).
topoin = ma.masked_values(array[::-1,:],-999.)
# transform DEM data to a 4 km native projection grid
nx = int((m.xmax-m.xmin)/4000.)+1; ny = int((m.ymax-m.ymin)/4000.)+1
topodat = m.transform_scalar(topoin,lons,lats,nx,ny,masked=True)
# plot DEM image on map.
im = m.imshow(topodat,cmap=cm.GMT_haxby_r)
# draw meridians and parallels.
m.drawparallels(np.arange(20,71,10),labels=[1,0,0,0])
m.drawmeridians(np.arange(-120,-40,10),labels=[0,0,0,1])
# plot state boundaries from shapefile using ogr.
g = ogr.Open ("st99_d00.shp")
L = g.GetLayer(0) # data is in 1st layer.
for feat in L: # iterate over features in layer
geo = feat.GetGeometryRef()
# iterate over geometries.
for count in range(geo.GetGeometryCount()):
geom = geo.GetGeometryRef(count)
if not geom.GetGeometryCount(): # just one geometry.
# get lon,lat points
lons = [geom.GetX(i) for i in range(geom.GetPointCount())]
lats = [geom.GetY(i) for i in range(geom.GetPointCount())]
# convert to map projection coords.
x, y = m(lons,lats)
# plot on map.
m.plot(x,y,'k')
else: # iterate over nested geometries.
for cnt in range( geom.GetGeometryCount()):
g = geom.GetGeometryRef( cnt )
lons = [g.GetX(i) for i in range(g.GetPointCount())]
lats = [g.GetY(i) for i in range(g.GetPointCount())]
x, y = m(lons,lats)
m.plot(x,y,'k')
# draw colorbar.
m.colorbar(im)
plt.title(gd.GetDescription()+' with state boundaries from '+g.GetName(),y=1.05)
plt.show()
| gpl-2.0 |
shikhar413/openmc | tests/regression_tests/filter_energyfun/test.py | 8 | 1854 | import openmc
import pytest
from tests.testing_harness import PyAPITestHarness
@pytest.fixture
def model():
model = openmc.model.Model()
m = openmc.Material()
m.set_density('g/cm3', 10.0)
m.add_nuclide('Am241', 1.0)
model.materials.append(m)
s = openmc.Sphere(r=100.0, boundary_type='vacuum')
c = openmc.Cell(fill=m, region=-s)
model.geometry = openmc.Geometry([c])
model.settings.batches = 5
model.settings.inactive = 0
model.settings.particles = 1000
# Define Am242m / Am242 branching ratio from ENDF/B-VII.1 data.
x = [1e-5, 3.69e-1, 1e3, 1e5, 6e5, 1e6, 2e6, 4e6, 3e7]
y = [0.1, 0.1, 0.1333, 0.158, 0.18467, 0.25618, 0.4297, 0.48, 0.48]
# Make an EnergyFunctionFilter directly from the x and y lists.
filt1 = openmc.EnergyFunctionFilter(x, y)
# Also make a filter with the .from_tabulated1d constructor. Make sure
# the filters are identical.
tab1d = openmc.data.Tabulated1D(x, y)
filt2 = openmc.EnergyFunctionFilter.from_tabulated1d(tab1d)
assert filt1 == filt2, 'Error with the .from_tabulated1d constructor'
# Make tallies
tallies = [openmc.Tally(), openmc.Tally()]
for t in tallies:
t.scores = ['(n,gamma)']
t.nuclides = ['Am241']
tallies[1].filters = [filt1]
model.tallies.extend(tallies)
return model
class FilterEnergyFunHarness(PyAPITestHarness):
def _get_results(self):
# Read the statepoint file.
sp = openmc.StatePoint(self._sp_name)
# Use tally arithmetic to compute the branching ratio.
br_tally = sp.tallies[2] / sp.tallies[1]
# Output the tally in a Pandas DataFrame.
return br_tally.get_pandas_dataframe().to_string() + '\n'
def test_filter_energyfun(model):
harness = FilterEnergyFunHarness('statepoint.5.h5', model)
harness.main()
| mit |
DSLituiev/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
andeplane/lammps | python/examples/matplotlib_plot.py | 22 | 2270 | #!/usr/bin/env python -i
# preceding line should have path for Python on your machine
# matplotlib_plot.py
# Purpose: plot Temp of running LAMMPS simulation via matplotlib
# Syntax: plot.py in.lammps Nfreq Nsteps compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point every this many steps
# Nsteps = run for this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
from __future__ import print_function
import sys
sys.path.append("./pizza")
import matplotlib
matplotlib.use('tkagg')
import matplotlib.pyplot as plt
# parse command line
argv = sys.argv
if len(argv) != 5:
print("Syntax: plot.py in.lammps Nfreq Nsteps compute-ID")
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
compute = sys.argv[4]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
# initial 0-step run to generate initial 1-point plot
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
# create matplotlib plot
# just proc 0 handles plotting
if me == 0:
fig = plt.figure()
line, = plt.plot(xaxis, yaxis)
plt.xlim([0, nsteps])
plt.title(compute)
plt.xlabel("Timestep")
plt.ylabel("Temperature")
plt.show(block=False)
# run nfreq steps at a time w/out pre/post, query compute, refresh plot
import time
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
if me == 0:
line.set_xdata(xaxis)
line.set_ydata(yaxis)
ax = plt.gca()
ax.relim()
ax.autoscale_view(True, True, True)
fig.canvas.draw()
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print("Proc %d out of %d procs has" % (me,nprocs), lmp)
#pypar.finalize()
if sys.version_info[0] == 3:
input("Press Enter to exit...")
else:
raw_input("Press Enter to exit...")
| gpl-2.0 |
YinongLong/scikit-learn | examples/missing_values.py | 71 | 3055 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
evanbiederstedt/CMBintheLikeHoodz | source_code/CAMB_vary_OmegaB_lmax1100_Feb2016.py | 1 | 137613 |
# coding: utf-8
# In[1]:
#
#
# hundred_samples = np.linspace(0.05, 0.5, num=100)
#
# Planck found \Omega_CDM
# GAVO simulated map set at \Omega_CDM = 0.122
# CAMB default below at omch2=0.122
#
# In[2]:
#
# First output 200 CAMB scalar outputs
#
# 0.005 to 0.05
#
# In[3]:
from matplotlib import pyplot as plt
import numpy as np
import camb
from camb import model, initialpower
# In[4]:
"""
#Set up a new set of parameters for CAMB
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.022, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(2000, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
for name in powers:
print(name)
# In[5]:
# plot the total lensed CMB power spectra versus unlensed, and fractional difference
totCL=powers['total']
unlensedCL=powers['unlensed_scalar']
print(totCL.shape)
# Python CL arrays are all zero based (starting at L=0), Note L=0,1 entries will be zero by default.
# The differenent CL are always in the order TT, EE, BB, TE (with BB=0 for unlensed scalar results).
ls = np.arange(totCL.shape[0])
print(ls)
#print(totCL[:30]) # print first 30 totCL
fig, ax = plt.subplots(2,2, figsize = (12,12))
ax[0,0].plot(ls,totCL[:,0], color='k')
ax[0,0].plot(ls,unlensedCL[:,0], color='r')
ax[0,0].set_title('TT')
ax[0,1].plot(ls[2:], 1-unlensedCL[2:,0]/totCL[2:,0]);
ax[0,1].set_title(r'$\Delta TT$')
ax[1,0].plot(ls,totCL[:,1], color='k')
ax[1,0].plot(ls,unlensedCL[:,1], color='r')
ax[1,0].set_title(r'$EE$')
ax[1,1].plot(ls,totCL[:,3], color='k')
ax[1,1].plot(ls,unlensedCL[:,3], color='r')
ax[1,1].set_title(r'$TE$');
for ax in ax.reshape(-1): ax.set_xlim([2,2500])
"""
# In[6]:
twohundred_samples = np.linspace(0.005, 0.05, num=200)
#print(twohundred_samples)
#Set up a new set of parameters for CAMB
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.022, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
pars.set_for_lmax(2500, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers =results.get_cmb_power_spectra(pars)
for name in powers:
print(name)
"""
array([ 0.005 , 0.00522613, 0.00545226, 0.00567839, 0.00590452,
0.00613065, 0.00635678, 0.00658291, 0.00680905, 0.00703518,
0.00726131, 0.00748744, 0.00771357, 0.0079397 , 0.00816583,
0.00839196, 0.00861809, 0.00884422, 0.00907035, 0.00929648,
0.00952261, 0.00974874, 0.00997487, 0.01020101, 0.01042714,
0.01065327, 0.0108794 , 0.01110553, 0.01133166, 0.01155779,
0.01178392, 0.01201005, 0.01223618, 0.01246231, 0.01268844,
0.01291457, 0.0131407 , 0.01336683, 0.01359296, 0.0138191 ,
0.01404523, 0.01427136, 0.01449749, 0.01472362, 0.01494975,
0.01517588, 0.01540201, 0.01562814, 0.01585427, 0.0160804 ,
0.01630653, 0.01653266, 0.01675879, 0.01698492, 0.01721106,
0.01743719, 0.01766332, 0.01788945, 0.01811558, 0.01834171,
0.01856784, 0.01879397, 0.0190201 , 0.01924623, 0.01947236,
0.01969849, 0.01992462, 0.02015075, 0.02037688, 0.02060302,
0.02082915, 0.02105528, 0.02128141, 0.02150754, 0.02173367,
0.0219598 , 0.02218593, 0.02241206, 0.02263819, 0.02286432,
0.02309045, 0.02331658, 0.02354271, 0.02376884, 0.02399497,
0.02422111, 0.02444724, 0.02467337, 0.0248995 , 0.02512563,
0.02535176, 0.02557789, 0.02580402, 0.02603015, 0.02625628,
0.02648241, 0.02670854, 0.02693467, 0.0271608 , 0.02738693,
0.02761307, 0.0278392 , 0.02806533, 0.02829146, 0.02851759,
0.02874372, 0.02896985, 0.02919598, 0.02942211, 0.02964824,
0.02987437, 0.0301005 , 0.03032663, 0.03055276, 0.03077889,
0.03100503, 0.03123116, 0.03145729, 0.03168342, 0.03190955,
0.03213568, 0.03236181, 0.03258794, 0.03281407, 0.0330402 ,
0.03326633, 0.03349246, 0.03371859, 0.03394472, 0.03417085,
0.03439698, 0.03462312, 0.03484925, 0.03507538, 0.03530151,
0.03552764, 0.03575377, 0.0359799 , 0.03620603, 0.03643216,
0.03665829, 0.03688442, 0.03711055, 0.03733668, 0.03756281,
0.03778894, 0.03801508, 0.03824121, 0.03846734, 0.03869347,
0.0389196 , 0.03914573, 0.03937186, 0.03959799, 0.03982412,
0.04005025, 0.04027638, 0.04050251, 0.04072864, 0.04095477,
0.0411809 , 0.04140704, 0.04163317, 0.0418593 , 0.04208543,
0.04231156, 0.04253769, 0.04276382, 0.04298995, 0.04321608,
0.04344221, 0.04366834, 0.04389447, 0.0441206 , 0.04434673,
0.04457286, 0.04479899, 0.04502513, 0.04525126, 0.04547739,
0.04570352, 0.04592965, 0.04615578, 0.04638191, 0.04660804,
0.04683417, 0.0470603 , 0.04728643, 0.04751256, 0.04773869,
0.04796482, 0.04819095, 0.04841709, 0.04864322, 0.04886935,
0.04909548, 0.04932161, 0.04954774, 0.04977387, 0.05 ])
"""
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls0 = unlencl[:,0][2:1101]
print(len(cls0))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls1 = unlencl[:,0][2:1101]
print(len(cls1))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls2 = unlencl[:,0][2:1101]
print(len(cls2))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls3 = unlencl[:,0][2:1101]
print(len(cls3))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls4 = unlencl[:,0][2:1101]
print(len(cls4))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls5 = unlencl[:,0][2:1101]
print(len(cls5))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls6 = unlencl[:,0][2:1101]
print(len(cls6))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls7 = unlencl[:,0][2:1101]
print(len(cls7))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls8 = unlencl[:,0][2:1101]
print(len(cls8))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls9 = unlencl[:,0][2:1101]
print(len(cls9))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls10 = unlencl[:,0][2:1101]
print(len(cls10))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls11 = unlencl[:,0][2:1101]
print(len(cls11))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls12 = unlencl[:,0][2:1101]
print(len(cls12))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls13 = unlencl[:,0][2:1101]
print(len(cls13))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls14 = unlencl[:,0][2:1101]
print(len(cls14))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls15 = unlencl[:,0][2:1101]
print(len(cls15))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls16 = unlencl[:,0][2:1101]
print(len(cls16))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls17 = unlencl[:,0][2:1101]
print(len(cls17))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls18 = unlencl[:,0][2:1101]
print(len(cls18))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls19 = unlencl[:,0][2:1101]
print(len(cls19))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls20 = unlencl[:,0][2:1101]
print(len(cls20))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls21 = unlencl[:,0][2:1101]
print(len(cls21))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls22 = unlencl[:,0][2:1101]
print(len(cls22))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls23 = unlencl[:,0][2:1101]
print(len(cls23))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls24 = unlencl[:,0][2:1101]
print(len(cls24))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls25 = unlencl[:,0][2:1101]
print(len(cls25))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls26 = unlencl[:,0][2:1101]
print(len(cls26))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls27 = unlencl[:,0][2:1101]
print(len(cls27))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls28 = unlencl[:,0][2:1101]
print(len(cls28))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls29 = unlencl[:,0][2:1101]
print(len(cls29))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls30 = unlencl[:,0][2:1101]
print(len(cls30))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls31 = unlencl[:,0][2:1101]
print(len(cls31))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls32 = unlencl[:,0][2:1101]
print(len(cls32))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls33 = unlencl[:,0][2:1101]
print(len(cls33))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls34 = unlencl[:,0][2:1101]
print(len(cls34))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls35 = unlencl[:,0][2:1101]
print(len(cls35))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls36 = unlencl[:,0][2:1101]
print(len(cls36))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls37 = unlencl[:,0][2:1101]
print(len(cls37))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls38 = unlencl[:,0][2:1101]
print(len(cls38))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls39 = unlencl[:,0][2:1101]
print(len(cls39))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls40 = unlencl[:,0][2:1101]
print(len(cls40))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls41 = unlencl[:,0][2:1101]
print(len(cls41))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls42 = unlencl[:,0][2:1101]
print(len(cls42))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls43 = unlencl[:,0][2:1101]
print(len(cls43))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls44 = unlencl[:,0][2:1101]
print(len(cls44))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls45 = unlencl[:,0][2:1101]
print(len(cls45))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls46 = unlencl[:,0][2:1101]
print(len(cls46))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls47 = unlencl[:,0][2:1101]
print(len(cls47))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls48 = unlencl[:,0][2:1101]
print(len(cls48))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls49 = unlencl[:,0][2:1101]
print(len(cls49))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls50 = unlencl[:,0][2:1101]
print(len(cls50))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls51 = unlencl[:,0][2:1101]
print(len(cls51))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls52 = unlencl[:,0][2:1101]
print(len(cls52))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls53 = unlencl[:,0][2:1101]
print(len(cls53))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls54 = unlencl[:,0][2:1101]
print(len(cls54))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls55 = unlencl[:,0][2:1101]
print(len(cls55))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls56 = unlencl[:,0][2:1101]
print(len(cls56))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls57 = unlencl[:,0][2:1101]
print(len(cls57))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls58 = unlencl[:,0][2:1101]
print(len(cls58))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls59 = unlencl[:,0][2:1101]
print(len(cls59))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls60 = unlencl[:,0][2:1101]
print(len(cls60))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls61 = unlencl[:,0][2:1101]
print(len(cls61))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls62 = unlencl[:,0][2:1101]
print(len(cls62))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls63 = unlencl[:,0][2:1101]
print(len(cls63))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls64 = unlencl[:,0][2:1101]
print(len(cls64))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls65 = unlencl[:,0][2:1101]
print(len(cls65))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls66 = unlencl[:,0][2:1101]
print(len(cls66))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls67 = unlencl[:,0][2:1101]
print(len(cls67))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls68 = unlencl[:,0][2:1101]
print(len(cls68))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls69 = unlencl[:,0][2:1101]
print(len(cls69))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls70 = unlencl[:,0][2:1101]
print(len(cls70))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls71 = unlencl[:,0][2:1101]
print(len(cls71))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls72 = unlencl[:,0][2:1101]
print(len(cls72))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls73 = unlencl[:,0][2:1101]
print(len(cls73))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls74 = unlencl[:,0][2:1101]
print(len(cls74))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls75 = unlencl[:,0][2:1101]
print(len(cls75))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls76 = unlencl[:,0][2:1101]
print(len(cls76))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls77 = unlencl[:,0][2:1101]
print(len(cls77))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls78 = unlencl[:,0][2:1101]
print(len(cls78))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls79 = unlencl[:,0][2:1101]
print(len(cls79))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls80 = unlencl[:,0][2:1101]
print(len(cls80))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls81 = unlencl[:,0][2:1101]
print(len(cls81))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls82 = unlencl[:,0][2:1101]
print(len(cls82))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls83 = unlencl[:,0][2:1101]
print(len(cls83))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls84 = unlencl[:,0][2:1101]
print(len(cls84))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls85 = unlencl[:,0][2:1101]
print(len(cls85))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls86 = unlencl[:,0][2:1101]
print(len(cls86))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls87 = unlencl[:,0][2:1101]
print(len(cls87))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls88 = unlencl[:,0][2:1101]
print(len(cls88))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls89 = unlencl[:,0][2:1101]
print(len(cls89))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls90 = unlencl[:,0][2:1101]
print(len(cls90))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls91 = unlencl[:,0][2:1101]
print(len(cls91))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls92 = unlencl[:,0][2:1101]
print(len(cls92))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls93 = unlencl[:,0][2:1101]
print(len(cls93))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls94 = unlencl[:,0][2:1101]
print(len(cls94))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls95 = unlencl[:,0][2:1101]
print(len(cls95))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls96 = unlencl[:,0][2:1101]
print(len(cls96))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls97 = unlencl[:,0][2:1101]
print(len(cls97))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls98 = unlencl[:,0][2:1101]
print(len(cls98))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls99 = unlencl[:,0][2:1101]
print(len(cls99))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls100 = unlencl[:,0][2:1101]
print(len(cls100))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls101 = unlencl[:,0][2:1101]
print(len(cls101))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls102 = unlencl[:,0][2:1101]
print(len(cls102))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls103 = unlencl[:,0][2:1101]
print(len(cls103))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls104 = unlencl[:,0][2:1101]
print(len(cls104))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls105 = unlencl[:,0][2:1101]
print(len(cls105))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls106 = unlencl[:,0][2:1101]
print(len(cls106))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls107 = unlencl[:,0][2:1101]
print(len(cls107))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls108 = unlencl[:,0][2:1101]
print(len(cls108))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls109 = unlencl[:,0][2:1101]
print(len(cls109))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls110 = unlencl[:,0][2:1101]
print(len(cls110))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls111 = unlencl[:,0][2:1101]
print(len(cls111))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls112 = unlencl[:,0][2:1101]
print(len(cls112))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls113 = unlencl[:,0][2:1101]
print(len(cls113))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls114 = unlencl[:,0][2:1101]
print(len(cls114))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls115 = unlencl[:,0][2:1101]
print(len(cls115))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls116 = unlencl[:,0][2:1101]
print(len(cls116))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls117 = unlencl[:,0][2:1101]
print(len(cls117))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls118 = unlencl[:,0][2:1101]
print(len(cls118))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls119 = unlencl[:,0][2:1101]
print(len(cls119))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls120 = unlencl[:,0][2:1101]
print(len(cls120))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls121 = unlencl[:,0][2:1101]
print(len(cls121))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls122 = unlencl[:,0][2:1101]
print(len(cls122))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls123 = unlencl[:,0][2:1101]
print(len(cls123))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls124 = unlencl[:,0][2:1101]
print(len(cls124))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls125 = unlencl[:,0][2:1101]
print(len(cls125))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls126 = unlencl[:,0][2:1101]
print(len(cls126))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls127 = unlencl[:,0][2:1101]
print(len(cls127))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls128 = unlencl[:,0][2:1101]
print(len(cls128))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls129 = unlencl[:,0][2:1101]
print(len(cls129))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls130 = unlencl[:,0][2:1101]
print(len(cls130))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls131 = unlencl[:,0][2:1101]
print(len(cls131))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls132 = unlencl[:,0][2:1101]
print(len(cls132))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls133 = unlencl[:,0][2:1101]
print(len(cls133))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls134 = unlencl[:,0][2:1101]
print(len(cls134))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls135 = unlencl[:,0][2:1101]
print(len(cls135))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls136 = unlencl[:,0][2:1101]
print(len(cls136))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls137 = unlencl[:,0][2:1101]
print(len(cls137))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls138 = unlencl[:,0][2:1101]
print(len(cls138))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls139 = unlencl[:,0][2:1101]
print(len(cls139))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls140 = unlencl[:,0][2:1101]
print(len(cls140))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls141 = unlencl[:,0][2:1101]
print(len(cls141))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls142 = unlencl[:,0][2:1101]
print(len(cls142))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls143 = unlencl[:,0][2:1101]
print(len(cls143))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls144 = unlencl[:,0][2:1101]
print(len(cls144))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls145 = unlencl[:,0][2:1101]
print(len(cls145))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls146 = unlencl[:,0][2:1101]
print(len(cls146))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls147 = unlencl[:,0][2:1101]
print(len(cls147))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls148 = unlencl[:,0][2:1101]
print(len(cls148))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls149 = unlencl[:,0][2:1101]
print(len(cls149))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls150 = unlencl[:,0][2:1101]
print(len(cls150))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls151 = unlencl[:,0][2:1101]
print(len(cls151))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls152 = unlencl[:,0][2:1101]
print(len(cls152))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls153 = unlencl[:,0][2:1101]
print(len(cls153))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls154 = unlencl[:,0][2:1101]
print(len(cls154))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls155 = unlencl[:,0][2:1101]
print(len(cls155))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls156 = unlencl[:,0][2:1101]
print(len(cls156))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls157 = unlencl[:,0][2:1101]
print(len(cls157))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls158 = unlencl[:,0][2:1101]
print(len(cls158))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls159 = unlencl[:,0][2:1101]
print(len(cls159))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls160 = unlencl[:,0][2:1101]
print(len(cls160))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls161 = unlencl[:,0][2:1101]
print(len(cls161))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls162 = unlencl[:,0][2:1101]
print(len(cls162))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls163 = unlencl[:,0][2:1101]
print(len(cls163))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls164 = unlencl[:,0][2:1101]
print(len(cls164))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls165 = unlencl[:,0][2:1101]
print(len(cls165))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls166 = unlencl[:,0][2:1101]
print(len(cls166))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls167 = unlencl[:,0][2:1101]
print(len(cls167))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls168 = unlencl[:,0][2:1101]
print(len(cls168))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls169 = unlencl[:,0][2:1101]
print(len(cls169))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls170 = unlencl[:,0][2:1101]
print(len(cls170))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls171 = unlencl[:,0][2:1101]
print(len(cls171))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls172 = unlencl[:,0][2:1101]
print(len(cls172))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls173 = unlencl[:,0][2:1101]
print(len(cls173))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls174 = unlencl[:,0][2:1101]
print(len(cls174))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls175 = unlencl[:,0][2:1101]
print(len(cls175))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls176 = unlencl[:,0][2:1101]
print(len(cls176))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls177 = unlencl[:,0][2:1101]
print(len(cls177))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls178 = unlencl[:,0][2:1101]
print(len(cls178))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls179 = unlencl[:,0][2:1101]
print(len(cls179))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls180 = unlencl[:,0][2:1101]
print(len(cls180))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls181 = unlencl[:,0][2:1101]
print(len(cls181))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls182 = unlencl[:,0][2:1101]
print(len(cls182))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls183 = unlencl[:,0][2:1101]
print(len(cls183))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls184 = unlencl[:,0][2:1101]
print(len(cls184))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls185 = unlencl[:,0][2:1101]
print(len(cls185))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls186 = unlencl[:,0][2:1101]
print(len(cls186))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls187 = unlencl[:,0][2:1101]
print(len(cls187))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls188 = unlencl[:,0][2:1101]
print(len(cls188))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls189 = unlencl[:,0][2:1101]
print(len(cls189))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls190 = unlencl[:,0][2:1101]
print(len(cls190))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls191 = unlencl[:,0][2:1101]
print(len(cls191))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls192 = unlencl[:,0][2:1101]
print(len(cls192))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls193 = unlencl[:,0][2:1101]
print(len(cls193))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls194 = unlencl[:,0][2:1101]
print(len(cls194))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls195 = unlencl[:,0][2:1101]
print(len(cls195))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls196 = unlencl[:,0][2:1101]
print(len(cls196))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls197 = unlencl[:,0][2:1101]
print(len(cls197))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls198 = unlencl[:,0][2:1101]
print(len(cls198))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls199 = unlencl[:,0][2:1101]
print(len(cls199))
pars = camb.CAMBparams()
#This function sets up CosmoMC-like settings, with one massive neutrino and helium set using BBN consistency
pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
pars.InitPower.set_params(ns=0.965, r=0)
#pars.set_for_lmax(514, lens_potential_accuracy=0)
#calculate results for these parameters
results = camb.get_results(pars)
#get dictionary of CAMB power spectra
powers = results.get_cmb_power_spectra(pars)
unlencl = powers['unlensed_scalar']
ls = np.arange(unlencl.shape[0])
print(ls)
print(len(ls))
#
# plot of spectrum
cls200 = unlencl[:,0][2:1101]
print(len(cls200))
"""
0.005
0.00522613065327
0.00545226130653
0.0056783919598
0.00590452261307
0.00613065326633
0.0063567839196
0.00658291457286
0.00680904522613
0.0070351758794
0.00726130653266
0.00748743718593
0.0077135678392
0.00793969849246
0.00816582914573
0.00839195979899
0.00861809045226
0.00884422110553
0.00907035175879
0.00929648241206
0.00952261306533
0.00974874371859
0.00997487437186
0.0102010050251
0.0104271356784
0.0106532663317
0.0108793969849
0.0111055276382
0.0113316582915
0.0115577889447
0.011783919598
0.0120100502513
0.0122361809045
0.0124623115578
0.0126884422111
0.0129145728643
0.0131407035176
0.0133668341709
0.0135929648241
0.0138190954774
0.0140452261307
0.0142713567839
0.0144974874372
0.0147236180905
0.0149497487437
0.015175879397
0.0154020100503
0.0156281407035
0.0158542713568
0.0160804020101
0.0163065326633
0.0165326633166
0.0167587939698
0.0169849246231
0.0172110552764
0.0174371859296
0.0176633165829
0.0178894472362
0.0181155778894
0.0183417085427
0.018567839196
0.0187939698492
0.0190201005025
0.0192462311558
0.019472361809
0.0196984924623
0.0199246231156
0.0201507537688
0.0203768844221
0.0206030150754
0.0208291457286
0.0210552763819
0.0212814070352
0.0215075376884
0.0217336683417
0.021959798995
0.0221859296482
0.0224120603015
0.0226381909548
0.022864321608
0.0230904522613
0.0233165829146
0.0235427135678
0.0237688442211
0.0239949748744
0.0242211055276
0.0244472361809
0.0246733668342
0.0248994974874
0.0251256281407
0.025351758794
0.0255778894472
0.0258040201005
0.0260301507538
0.026256281407
0.0264824120603
0.0267085427136
0.0269346733668
0.0271608040201
0.0273869346734
0.0276130653266
0.0278391959799
0.0280653266332
0.0282914572864
0.0285175879397
0.028743718593
0.0289698492462
0.0291959798995
0.0294221105528
0.029648241206
0.0298743718593
0.0301005025126
0.0303266331658
0.0305527638191
0.0307788944724
0.0310050251256
0.0312311557789
0.0314572864322
0.0316834170854
0.0319095477387
0.032135678392
0.0323618090452
0.0325879396985
0.0328140703518
0.033040201005
0.0332663316583
0.0334924623116
0.0337185929648
0.0339447236181
0.0341708542714
0.0343969849246
0.0346231155779
0.0348492462312
0.0350753768844
0.0353015075377
0.035527638191
0.0357537688442
0.0359798994975
0.0362060301508
0.036432160804
0.0366582914573
0.0368844221106
0.0371105527638
0.0373366834171
0.0375628140704
0.0377889447236
0.0380150753769
0.0382412060302
0.0384673366834
0.0386934673367
0.0389195979899
0.0391457286432
0.0393718592965
0.0395979899497
0.039824120603
0.0400502512563
0.0402763819095
0.0405025125628
0.0407286432161
0.0409547738693
0.0411809045226
0.0414070351759
0.0416331658291
0.0418592964824
0.0420854271357
0.0423115577889
0.0425376884422
0.0427638190955
0.0429899497487
0.043216080402
0.0434422110553
0.0436683417085
0.0438944723618
0.0441206030151
0.0443467336683
0.0445728643216
0.0447989949749
0.0450251256281
0.0452512562814
0.0454773869347
0.0457035175879
0.0459296482412
0.0461557788945
0.0463819095477
0.046608040201
0.0468341708543
0.0470603015075
0.0472864321608
0.0475125628141
0.0477386934673
0.0479648241206
0.0481909547739
0.0484170854271
0.0486432160804
0.0488693467337
0.0490954773869
0.0493216080402
0.0495477386935
0.0497738693467
0.05
"""
# In[50]:
cl_array = np.array([cls0, cls1, cls2, cls3, cls4, cls5, cls6, cls7, cls8, cls9, cls10,
cls11, cls12, cls13, cls14, cls15, cls16, cls17, cls18, cls19, cls20,
cls21, cls22, cls23, cls24, cls25, cls26, cls27, cls28, cls29, cls30,
cls31, cls32, cls33, cls34, cls35, cls36, cls37, cls38, cls39, cls40,
cls41, cls42, cls43, cls44, cls45, cls46, cls47, cls48, cls49, cls50,
cls51, cls52, cls53, cls54, cls55, cls56, cls57, cls58, cls59, cls60,
cls61, cls62, cls63, cls64, cls65, cls66, cls67, cls68, cls69, cls70,
cls71, cls72, cls73, cls74, cls75, cls76, cls77, cls78, cls79, cls80,
cls81, cls82, cls83, cls84, cls85, cls86, cls87, cls88, cls89, cls90,
cls91, cls92, cls93, cls94, cls95, cls96, cls97, cls98, cls99, cls100,
cls101, cls102, cls103, cls104, cls105, cls106, cls107, cls108, cls109, cls110,
cls111, cls112, cls113, cls114, cls115, cls116, cls117, cls118, cls119, cls120,
cls121, cls122, cls123, cls124, cls125, cls126, cls127, cls128, cls129, cls130,
cls131, cls132, cls133, cls134, cls135, cls136, cls137, cls138, cls139, cls140,
cls141, cls142, cls143, cls144, cls145, cls146, cls147, cls148, cls149, cls150,
cls151, cls152, cls153, cls154, cls155, cls156, cls157, cls158, cls159, cls160,
cls161, cls162, cls163, cls164, cls165, cls166, cls167, cls168, cls169, cls170,
cls171, cls172, cls173, cls174, cls175, cls176, cls177, cls178, cls179, cls180,
cls181, cls182, cls183, cls184, cls185, cls186, cls187, cls188, cls189, cls190,
cls191, cls192, cls193, cls194, cls195, cls196, cls197, cls198, cls199, cls200])
# In[51]:
print(cl_array.shape)
# In[52]:
f = "CAMB_cl_varyBaryon_lmax1100varyFeb2016.npy"
np.save(f, cl_array)
| mit |
Upward-Spiral-Science/team1 | code/test_assumptions.py | 1 | 1525 | import numpy as np
import matplotlib.pyplot as plt
import urllib2
#%matplotlib inline
sample_size = 1000
np.random.seed(1)
url = ('https://raw.githubusercontent.com/Upward-Spiral-Science'
'/data/master/syn-density/output.csv')
data = urllib2.urlopen(url)
csv = np.genfromtxt(data, delimiter=",")[1:]
csv_rand = None
for i in range (1, sample_size):
#Randomly sample from dataset
a = np.random.permutation(np.arange(csv.shape[0]))[:100]
csv_rand_sample = csv[a]
# Normalize
mean_unmask = np.mean(csv_rand_sample[:,3])
std_unmask = np.std(csv_rand_sample[:,3])
csv_rand_sample[:,3] = (csv_rand_sample[:,3]-mean_unmask)/std_unmask
#Stack matrix
if i == 1:
csv_rand = csv_rand_sample
else:
csv_rand = np.dstack((csv_rand,csv_rand_sample))
#Average across random samples
csv_rand = np.mean(csv_rand,axis=2)
#Independence Assumption
covar = np.cov(csv_rand_sample)
plt.figure(figsize=(7,7))
plt.imshow(covar)
plt.title('Covariance of Synapse Density dataset')
plt.colorbar()
plt.show()
diag = covar.diagonal()*np.eye(covar.shape[0])
hollow = covar-diag
d_det = np.linalg.slogdet(diag)[1]
h_det = np.linalg.slogdet(hollow)[1]
print d_det
print h_det
plt.figure(figsize=(11,8))
plt.subplot(121)
plt.imshow(diag)
plt.clim([0, np.max(covar)])
plt.title('Determinant of on-diagonal: ' + str(d_det))
plt.subplot(122)
plt.imshow(hollow)
plt.clim([0, np.max(covar)])
plt.title('Determinant of off-diagonal: ' + str(h_det))
plt.show()
print "Ratio of on and off-diagonal determinants: " + str(d_det/h_det)
| apache-2.0 |
rahuldhote/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
pcm17/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py | 62 | 3753 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(
mocks.MockTensor("Mock Tensor 2", dtypes.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", dtypes.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {
"a": mocks.MockTensor("Mock Tensor 1", dtypes.int32),
"b": mocks.MockTensor("Mock Tensor 2", dtypes.int32),
"c": mocks.MockTensor("Mock Tensor 1", dtypes.int32)
}
self.assertEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
agdestine/machine-learning | code/abaloneUtils.py | 5 | 4299 | # utils
# Utility functions for handling data
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Thu Feb 26 17:47:35 2015 -0500
#
# Copyright (C) 2015 District Data Labs
# For license information, see LICENSE.txt
#
# ID: utils.py [] [email protected] $
"""
Utility functions for handling data
"""
##########################################################################
## Imports
##########################################################################
import os
import csv
import time
import json
import numpy as np
from sklearn.datasets.base import Bunch
##########################################################################
## Module Constants
##########################################################################
SKL_DATA = "SCIKIT_LEARN_DATA"
BASE_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__), ".."))
DATA_DIR = os.path.join(BASE_DIR, "data")
CODE_DIR = os.path.join(BASE_DIR, "code")
##########################################################################
## Helper Functions
##########################################################################
def timeit(func):
"""
Returns how long a function took to execute, along with the output
"""
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
return result, time.time() - start
return timeit
##########################################################################
## Dataset Loading
##########################################################################
def get_data_home(data_home=None):
"""
Returns the path of the data directory
"""
if data_home is None:
data_home = os.environ.get(SKL_DATA, DATA_DIR)
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
def load_data(path, descr=None, target_index=-1):
"""
Returns a scklearn dataset Bunch which includes several important
attributes that are used in modeling:
data: array of shape n_samples * n_features
target: array of length n_samples
feature_names: names of the features
target_names: names of the targets
filenames: names of the files that were loaded
DESCR: contents of the readme
This data therefore has the look and feel of the toy datasets.
Pass in a path usually just the name of the location in the data dir.
It will be joined with the result of `get_data_home`. The contents are:
path
- abalone.names # The file to load into DESCR
- meta.json # A file containing metadata to load
- dataset.txt # The numpy loadtxt file
- dataset.csv # The pandas read_csv file
You can specify another descr, another feature_names, and whether or
not the dataset has a header row. You can also specify the index of the
target, which by default is the last item in the row (-1)
"""
root = os.path.join(get_data_home(), path)
filenames = {
'meta': os.path.join(root, 'meta.json'),
'rdme': os.path.join(root, 'abalone.names'),
'data': os.path.join(root, 'dataset.csv'),
}
target_names = None
feature_names = None
DESCR = None
with open(filenames['meta'], 'r') as f:
meta = json.load(f)
target_names = meta['target_names']
feature_names = meta['feature_names']
with open(filenames['rdme'], 'r') as f:
DESCR = f.read()
# skip header from csv, load data
dataset = np.loadtxt(filenames['data'], delimiter=',', skiprows=1)
data = None
target = None
# Target assumed to be either last or first row
if target_index == -1:
data = dataset[:,0:-1]
target = dataset[:,-1]
elif target_index == 0:
data = dataset[:,1:]
target = dataset[:,0]
else:
raise ValueError("Target index must be either -1 or 0")
return Bunch(data=data,
target=target,
filenames=filenames,
target_names=target_names,
feature_names=feature_names,
DESCR=DESCR)
def load_abalone():
return load_data('abalone')
| mit |
bottydim/detect-credit-card-fraud | ccfd_dnn/model_weight.py | 1 | 20628 | import os
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
import pandas as pd
import matplotlib
import numpy as np
import math
import matplotlib.pyplot as plt
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
import plotly.tools as tls
import pandas as pd
from sqlalchemy import create_engine # database connection
import datetime as dt
import io
import logging
import plotly.plotly as py # interactive graphing
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from plotly.graph_objs import Bar, Scatter, Marker, Layout
from heraspy.model import HeraModel
np.random.seed(1337)
import theano
import keras
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model,model_from_yaml
from keras.layers import Input, Dense, GRU, LSTM, TimeDistributed, Masking,merge
from model import *
import argparse
import sys
if __name__ == "__main__":
t_start = dt.datetime.now()
parser = argparse.ArgumentParser(prog='Weighted Model')
parser.add_argument('-t','--table',required=True)
args = parser.parse_args()
####################################DATA SOURCE################################
table = vars(args)['table']
# table = 'data_trim'
# rsl_file = './data/gs_results_trim.csv'
# rsl_file = './data/psql_data_trim.csv'
# table = 'data_little_enc'
# rsl_file = './data/gs_results_little.csv'
# table = 'data_more'
# rsl_file = './data/gs_results_more.csv'
# table = 'auth'
# rsl_file = './data/auth.csv'
events_tbl = 'event'
events_tbl = None
rsl_file = './data/psql_{table}.csv'.format(table=table)
################################################################################
print "Commencing..."
data_dir = './data/'
evt_name = 'Featurespace_events_output.csv'
auth_name = 'Featurespace_auths_output.csv'
db_name = 'c1_agg.db'
address = "postgresql+pg8000://script@localhost:5432/ccfd"
# disk_engine = create_engine('sqlite:///'+data_dir+db_name,convert_unicode=True)
# disk_engine.raw_connection().connection.text_factory = str
disk_engine = create_engine(address)
#######################Settings#############################################
samples_per_epoch = trans_num_table(table,disk_engine,mode='train',trans_mode='train')
# epoch_limit = 10000
# samples_per_epoch = epoch_limit
# user_sample_size = 8000
epoch_limit = samples_per_epoch
user_sample_size = None
nb_epoch = 300
fraud_w_list = [1000.]
##########ENCODERS CONF
tbl_src = 'auth'
# tbl_src = table
tbl_evnt = 'event'
##################################
batch_size = 300
batch_size_val = 1000
print "SAMPLES per epoch:",samples_per_epoch
print "User sample size:",user_sample_size
print 'sequence length size',batch_size
# samples_per_epoch = 1959
# table = 'data_trim'
# samples_per_epoch = 485
lbl_pad_val = 2
pad_val = 0
# dropout_W_list = [0.3]
dropout_W_list = [0.4,0.5,0.6,0.7]
# dropout_W_list = [0.15,0.3,0.4,0.8]
input_dim = 44
hid_dims = [320]
num_l = [7]
lr_s = [2.5e-4]
# lr_s = [1.25e-4,6e-5]
# lr_s = [1e-2,1e-3,1e-4]
# lr_s = [1e-1,1e-2,1e-3]
num_opt = 1
opts = lambda x,lr:[keras.optimizers.RMSprop(lr=lr, rho=0.9, epsilon=1e-08),
# keras.optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08),
# keras.optimizers.Nadam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
][x]
# add_info = str(int(seq_len_param))+'_class_w_'+str(fraud_w)
print 'Populating encoders'
path_encoders ='./data/encoders/{tbl_src}+{tbl_evnt}'.format(tbl_src=tbl_src,tbl_evnt=tbl_evnt)
if os.path.exists(path_encoders):
encoders = load_encoders(path_encoders)
else:
encoders = populate_encoders_scale(tbl_src,disk_engine,tbl_evnt)
with open(path_encoders, 'wb') as output:
pickle.dump(encoders, output, pickle.HIGHEST_PROTOCOL)
print 'ENCODERS SAVED to {path}!'.format(path=path_encoders)
# sys.exit()
gru_dict = {}
lstm_dict = {}
for fraud_w in fraud_w_list:
add_info = 'Mask=pad_class_w_'+str(fraud_w)+'ES-OFF'
class_weight = {0 : 1.,
1: fraud_w,
2: 0.}
for dropout_W in dropout_W_list:
for hidden_dim in hid_dims:
# gru
for opt_id in range(num_opt):
for lr in lr_s:
optimizer = opts(opt_id,lr)
for num_layers in num_l:
for rnn in ['gru']:
short_title = 'bi_'+rnn.upper()+'_'+str(hidden_dim)+'_'+str(num_layers)+'_DO-'+str(dropout_W)+'_w'+str(class_weight[1])
title = 'Bidirectional_Class'+str(class_weight[1])+'_'+rnn.upper()+'_'+str(hidden_dim)+'_'+str(num_layers)+'_'+str(type(optimizer).__name__)+'_'+str(lr)+'_epochs_'+str(nb_epoch)+'_DO-'+str(dropout_W)
print title
input_layer = Input(shape=(int(seq_len_param), input_dim),name='main_input')
mask = Masking(mask_value=pad_val)(input_layer)
x = mask
for i in range(num_layers):
if rnn == 'gru':
prev_frw = GRU(hidden_dim,#input_length=50,
return_sequences=True,go_backwards=False,stateful=False,
unroll=False,consume_less='gpu',
init='glorot_uniform', inner_init='orthogonal', activation='tanh',
inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None,
b_regularizer=None, dropout_W=dropout_W, dropout_U=0.0)(x)
prev_bck = GRU(hidden_dim,#input_length=50,
return_sequences=True,go_backwards=True,stateful=False,
unroll=False,consume_less='gpu',
init='glorot_uniform', inner_init='orthogonal', activation='tanh',
inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None,
b_regularizer=None, dropout_W=dropout_W, dropout_U=0.0)(x)
else:
prev_frw = LSTM(hidden_dim, return_sequences=True,go_backwards=False,stateful=False,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=dropout_W, dropout_U=0.0)(x)
prev_bck = LSTM(hidden_dim, return_sequences=True,go_backwards=True,stateful=False,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=dropout_W, dropout_U=0.0)(x)
x = merge([prev_frw, prev_bck], mode='concat')
output_layer = TimeDistributed(Dense(3,activation='softmax'))(x)
model = Model(input=[input_layer],output=[output_layer])
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
sample_weight_mode="temporal")
########save architecture ######
arch_dir = './data/models/archs/'+short_title+'.yml'
yaml_string = model.to_yaml()
with open(arch_dir, 'wb') as output:
pickle.dump(yaml_string, output, pickle.HIGHEST_PROTOCOL)
print 'model saved!'
##############
user_mode = 'train'
trans_mode = 'train'
data_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size,usr_ratio=80,class_weight=class_weight,lbl_pad_val = lbl_pad_val, pad_val = pad_val,
sub_sample=user_sample_size,epoch_size=epoch_limit,events_tbl=events_tbl)
# sub_sample=user_sample_size,epoch_size=samples_per_epoch)
########validation data
print 'Generating Validation set!'
user_mode = 'test'
trans_mode = 'test'
val_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size_val,usr_ratio=80,class_weight=class_weight,lbl_pad_val = lbl_pad_val, pad_val = pad_val,
sub_sample=None,epoch_size=None,events_tbl=events_tbl)
validation_data = next(val_gen)
print '################GENERATED#######################'
###############CALLBACKS
patience = 30
early_Stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, verbose=0, mode='auto')
save_path = './data/models/'+table+'/'
var_name = '.{epoch:02d}-{val_loss:.5f}.hdf5'
checkpoint = keras.callbacks.ModelCheckpoint(save_path+short_title+var_name, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
root_url = 'http://localhost:9000'
remote_log = keras.callbacks.RemoteMonitor(root=root_url)
# callbacks = [early_Stop,checkpoint]
callbacks = [early_Stop,checkpoint,remote_log]
callbacks = []
history = model.fit_generator(data_gen, samples_per_epoch, nb_epoch, verbose=1, callbacks=callbacks,validation_data=validation_data, nb_val_samples=None, class_weight=None, max_q_size=10000)
py.sign_in('bottydim', 'o1kuyms9zv')
auc_list = []
print '#########################TRAIN STATS################'
user_mode = 'train'
trans_mode = 'train'
val_samples = trans_num_table(table,disk_engine,mode=user_mode,trans_mode=trans_mode)
print '# samples',val_samples
plt_filename = './figures/GS/'+table+'/'+'ROC_'+user_mode+'_'+trans_mode+'_'+title+'_'+add_info+".png"
data_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size,usr_ratio=80,class_weight=None,lbl_pad_val = lbl_pad_val, pad_val = pad_val,events_tbl=events_tbl)
eval_list = eval_auc_generator(model, data_gen, val_samples, max_q_size=10000,plt_filename=plt_filename)
auc_val = eval_list[0]
clc_report = eval_list[1]
acc = eval_list[2]
print "AUC:",auc_val
print 'CLassification report'
print clc_report
print 'Accuracy'
print acc
auc_list.append(str(auc_val))
print '##################EVALUATION USERS#########################'
user_mode = 'test'
trans_mode = 'train'
val_samples = trans_num_table(table,disk_engine,mode=user_mode,trans_mode=trans_mode)
print '# samples',val_samples
plt_filename = './figures/GS/'+table+'/'+'ROC_'+user_mode+'_'+trans_mode+'_'+title+'_'+add_info+".png"
eval_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size,usr_ratio=80,class_weight=None,lbl_pad_val = lbl_pad_val, pad_val = pad_val,events_tbl=events_tbl)
eval_list = eval_auc_generator(model, eval_gen, val_samples, max_q_size=10000,plt_filename=plt_filename)
auc_val = eval_list[0]
clc_report = eval_list[1]
acc = eval_list[2]
print "AUC:",auc_val
print 'CLassification report'
print clc_report
print 'Accuracy'
print acc
auc_list.append(str(auc_val))
print '#####################################################'
print '##################EVALUATION Transactions#########################'
user_mode = 'train'
trans_mode = 'test'
val_samples = trans_num_table(table,disk_engine,mode=user_mode,trans_mode=trans_mode)
print '# samples',val_samples
plt_filename = './figures/GS/'+table+'/'+'ROC_'+user_mode+'_'+trans_mode+'_'+title+'_'+add_info+".png"
eval_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size,usr_ratio=80,class_weight=None,lbl_pad_val = lbl_pad_val, pad_val = pad_val,events_tbl=events_tbl)
eval_list = eval_auc_generator(model, eval_gen, val_samples, max_q_size=10000,plt_filename=plt_filename)
auc_val = eval_list[0]
clc_report = eval_list[1]
acc = eval_list[2]
print "AUC:",auc_val
print 'CLassification report'
print clc_report
print 'Accuracy'
print acc
auc_list.append(str(auc_val))
print '#####################################################'
print '##################EVALUATION Pure#########################'
user_mode = 'test'
trans_mode = 'test'
val_samples = trans_num_table(table,disk_engine,mode=user_mode,trans_mode=trans_mode)
print '# samples',val_samples
plt_filename = './figures/GS/'+table+'/'+'ROC_'+user_mode+'_'+trans_mode+'_'+title+'_'+add_info+".png"
eval_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size,usr_ratio=80,class_weight=None,lbl_pad_val = lbl_pad_val, pad_val = pad_val,events_tbl=events_tbl)
eval_list = eval_auc_generator(model, eval_gen, val_samples, max_q_size=10000,plt_filename=plt_filename)
auc_val = eval_list[0]
clc_report = eval_list[1]
acc = eval_list[2]
print "AUC:",auc_val
print 'CLassification report'
print clc_report
print 'Accuracy'
print acc
auc_list.append(str(auc_val))
print '#####################################################'
with io.open(rsl_file, 'a', encoding='utf-8') as file:
auc_string = ','.join(auc_list)
title_csv = title.replace('_',',')+','+str(history.history['acc'][-1])+','+str(history.history['loss'][-1])+','+str(auc_val)+','+str(acc)+','+auc_string+'\n'
file.write(unicode(title_csv))
print 'logged @ {file}'.format(file=rsl_file)
trim_point = -15
fig = {
'data': [Scatter(
x=history.epoch[trim_point:],
y=history.history['loss'][trim_point:])],
'layout': {'title': title}
}
py.image.save_as(fig,filename='./results/figures/'+table+'/'+short_title+'_'+'LOSS'+'_'+add_info+".png")
trim_point = 0
fig = {
'data': [Scatter(
x=history.epoch[trim_point:],
y=history.history['loss'][trim_point:])],
'layout': {'title': title}
}
py.image.save_as(fig,filename='./results/figures/'+table+'/'+short_title+'_'+'LOSS'+'_'+'FULL'+".png")
# iplot(fig,filename='figures/'+title,image='png')
# title = title.replace('Loss','Acc')
fig = {
'data': [Scatter(
x=history.epoch[trim_point:],
y=history.history['acc'][trim_point:])],
'layout': {'title': title}
}
filename_val='./results/figures/'+table+'/'+short_title+'_'+'ACC'+'_'+add_info+".png"
py.image.save_as(fig,filename=filename_val)
print 'exported @',filename_val
fig = {
'data': [Scatter(
x=history.epoch[trim_point:],
y=history.history['val_loss'][trim_point:])],
'layout': {'title': title}
}
py.image.save_as(fig,filename='./results/figures/'+table+'/'+short_title+'_'+'VAL LOSS'+'_'+add_info+".png")
print 'time taken: {time}'.format(time=days_hours_minutes_seconds(dt.datetime.now()-t_start)) | mit |
zhenv5/scikit-learn | examples/classification/plot_lda.py | 70 | 2413 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="Linear Discriminant Analysis with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="Linear Discriminant Analysis", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('Linear Discriminant Analysis vs. \
shrinkage Linear Discriminant Analysis (1 discriminative feature)')
plt.show()
| bsd-3-clause |
pinkavaj/gnuradio | gr-fec/python/fec/polar/channel_construction_bec.py | 22 | 8068 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy as np
import helper_functions as hf
def bec_channel(eta):
'''
binary erasure channel (BEC)
for each y e Y
W(y|0) * W(y|1) = 0 or W(y|0) = W(y|1)
transistions are 1 -> 1 or 0 -> 0 or {0, 1} -> ? (erased symbol)
'''
# looks like BSC but should be interpreted differently.
w = np.array((1 - eta, eta, 1 - eta), dtype=float)
return w
def odd_rec(iwn):
return iwn ** 2
def even_rec(iwn):
return 2 * iwn - iwn ** 2
def calc_one_recursion(iw0):
iw1 = np.zeros(2 * len(iw0)) # double values
for i in range(len(iw0)):
# careful indices screw you because paper is '1' based :(
iw1[2 * i] = odd_rec(iw0[i])
iw1[2 * i + 1] = even_rec(iw0[i])
return iw1
def calculate_bec_channel_capacities_loop(initial_channel, block_power):
# compare [0, Arikan] eq. 6
iw = np.array([initial_channel, ], dtype=float)
for i in range(block_power):
iw = calc_one_recursion(iw)
return iw
def calc_vector_capacities_one_recursion(iw0):
degraded = odd_rec(iw0)
upgraded = even_rec(iw0)
iw1 = np.empty(2 * len(iw0), dtype=degraded.dtype)
iw1[0::2] = degraded
iw1[1::2] = upgraded
return iw1
def calculate_bec_channel_capacities_vector(initial_channel, block_power):
# compare [0, Arikan] eq. 6
# this version is ~ 180 times faster than the loop version with 2**22 synthetic channels
iw = np.array([initial_channel, ], dtype=float)
for i in range(block_power):
iw = calc_vector_capacities_one_recursion(iw)
return iw
def calculate_bec_channel_capacities(eta, block_size):
# compare [0, Arikan] eq. 6
iw = 1 - eta # holds for BEC as stated in paper
lw = hf.power_of_2_int(block_size)
return calculate_bec_channel_capacities_vector(iw, lw)
def calculate_z_parameters_one_recursion(z_params):
z_next = np.empty(2 * z_params.size, dtype=z_params.dtype)
z_sq = z_params ** 2
z_low = 2 * z_params - z_sq
z_next[0::2] = z_low
z_next[1::2] = z_sq
return z_next
def calculate_bec_channel_z_parameters(eta, block_size):
# compare [0, Arikan] eq. 38
block_power = hf.power_of_2_int(block_size)
z_params = np.array([eta, ], dtype=float)
for block_size in range(block_power):
z_params = calculate_z_parameters_one_recursion(z_params)
return z_params
def design_snr_to_bec_eta(design_snr):
# minimum design snr = -1.5917 corresponds to BER = 0.5
s = 10. ** (design_snr / 10.)
return np.exp(-s)
def bhattacharyya_bounds(design_snr, block_size):
'''
Harish Vangala, Emanuele Viterbo, Yi Hong: 'A Comparative Study of Polar Code Constructions for the AWGN Channel', 2015
In this paper it is called Bhattacharyya bounds channel construction and is abbreviated PCC-0
Best design SNR for block_size = 2048, R = 0.5, is 0dB.
Compare with Arikan: 'Channel Polarization: A Method for Constructing Capacity-Achieving Codes for Symmetric Binary-Input Memoryless Channels.
Proposition 5. inequalities turn into equalities for BEC channel. Otherwise they represent an upper bound.
Also compare [0, Arikan] eq. 6 and 38
For BEC that translates to capacity(i) = 1 - bhattacharyya(i)
:return Z-parameters in natural bit-order. Choose according to desired rate.
'''
eta = design_snr_to_bec_eta(design_snr)
return calculate_bec_channel_z_parameters(eta, block_size)
def plot_channel_capacities(capacity, save_file=None):
block_size = len(capacity)
try:
import matplotlib.pyplot as plt
# FUN with matplotlib LaTeX fonts! http://matplotlib.org/users/usetex.html
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('figure', autolayout=True)
plt.plot(capacity)
plt.xlim([0, block_size])
plt.ylim([-0.01, 1.01])
plt.xlabel('synthetic channel number')
plt.ylabel('channel capacity')
# plt.title('BEC channel construction')
plt.grid()
plt.gcf().set_size_inches(plt.gcf().get_size_inches() * .5)
if save_file:
plt.savefig(save_file)
plt.show()
except ImportError:
pass # only plot in case matplotlib is installed
def plot_average_channel_distance(save_file=None):
eta = 0.5 # design_snr_to_bec_eta(-1.5917)
powers = np.arange(4, 26)
try:
import matplotlib.pyplot as plt
import matplotlib
# FUN with matplotlib LaTeX fonts! http://matplotlib.org/users/usetex.html
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('figure', autolayout=True)
dist = []
medians = []
initial_channel = 1 - eta
for p in powers:
bs = int(2 ** p)
capacities = calculate_bec_channel_capacities(eta, bs)
avg_capacity = np.repeat(initial_channel, len(capacities))
averages = np.abs(capacities - avg_capacity)
avg_distance = np.sum(averages) / float(len(capacities))
dist.append(avg_distance)
variance = np.std(averages)
medians.append(variance)
plt.errorbar(powers, dist, yerr=medians)
plt.grid()
plt.xlabel(r'block size $N$')
plt.ylabel(r'$\frac{1}{N} \sum_i |I(W_N^{(i)}) - 0.5|$')
axes = plt.axes()
tick_values = np.array(axes.get_xticks().tolist())
tick_labels = np.array(tick_values, dtype=int)
tick_labels = ['$2^{' + str(i) + '}$' for i in tick_labels]
plt.xticks(tick_values, tick_labels)
plt.xlim((powers[0], powers[-1]))
plt.ylim((0.2, 0.5001))
plt.gcf().set_size_inches(plt.gcf().get_size_inches() * .5)
if save_file:
plt.savefig(save_file)
plt.show()
except ImportError:
pass
def plot_capacity_histogram(design_snr, save_file=None):
eta = design_snr_to_bec_eta(design_snr)
# capacities = calculate_bec_channel_capacities(eta, block_size)
try:
import matplotlib.pyplot as plt
# FUN with matplotlib LaTeX fonts! http://matplotlib.org/users/usetex.html
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('figure', autolayout=True)
block_sizes = [32, 128, 512]
for b in block_sizes:
capacities = calculate_bec_channel_capacities(eta, b)
w = 1. / float(len(capacities))
weights = [w, ] * b
plt.hist(capacities, bins=b, weights=weights, range=(0.95, 1.0))
plt.grid()
plt.xlabel('synthetic channel capacity')
plt.ylabel('normalized item count')
print(plt.gcf().get_size_inches())
plt.gcf().set_size_inches(plt.gcf().get_size_inches() * .5)
if save_file:
plt.savefig(save_file)
plt.show()
except ImportError:
pass
def main():
print 'channel construction main'
n = 11
block_size = int(2 ** n)
design_snr = -1.59
eta = design_snr_to_bec_eta(design_snr)
# print(calculate_bec_channel_z_parameters(eta, block_size))
# capacity = calculate_bec_channel_capacities(eta, block_size)
# plot_average_channel_distance()
calculate_bec_channel_z_parameters(eta, block_size)
if __name__ == '__main__':
main()
| gpl-3.0 |
marshallmcdonnell/interactive_plotting | matplotlib/draggable_legend_code.py | 1 | 3140 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as _plt
class DraggableLegend:
def __init__(self, legend):
self.legend = legend
self.gotLegend = False
legend.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
legend.figure.canvas.mpl_connect('pick_event', self.on_picker)
legend.figure.canvas.mpl_connect('button_release_event', self.on_release)
legend.set_picker(self.my_legend_picker)
#----------------------------------------------------#
# Connected event handlers
def on_motion(self, event):
if self.gotLegend:
dx = event.x - self.mouse_x
dy = event.y - self.mouse_y
loc_in_canvas = self.legend_x + dx, self.legend_y + dy
loc_in_norm_axes = self.legend.parent.transAxes.inverted().transform_point(loc_in_canvas)
self.legend._loc = tuple(loc_in_norm_axes)
self.legend.figure.canvas.draw()
def my_legend_picker(self, legend, event):
return self.legend.legendPatch.contains(event)
def on_picker(self, event):
if event.artist == self.legend:
# left-click
if event.mouseevent.button == 1:
self._move_legend(event)
# mouse button pressed
if event.mouseevent.button == 2:
pass
# right-click
if event.mouseevent.button == 3:
self._hideLegend()
# mouse up
if event.mouseevent.button == 'up':
self._scaleUpLegendFont()
# mouse down
if event.mouseevent.button == 'down':
self._scaleDownLegendFont()
def on_release(self, event):
if self.gotLegend:
self.gotLegend = False
#----------------------------------------------------#
# Utility functions
def _move_legend(self,event):
bbox = self.legend.get_window_extent()
self.mouse_x = event.mouseevent.x
self.mouse_y = event.mouseevent.y
self.legend_x = bbox.xmin
self.legend_y = bbox.ymin
self.gotLegend = 1
def _scaleUpLegendFont(self,size_step=4):
size = self.legend.get_texts()[0].get_fontsize()
size += size_step
_plt.setp(self.legend.get_texts(), fontsize=size) #legend 'list' fontsize
self.legend.figure.canvas.draw()
def _scaleDownLegendFont(self,size_step=4):
size = self.legend.get_texts()[0].get_fontsize()
size -= size_step
_plt.setp(self.legend.get_texts(), fontsize=size) #legend 'list' fontsize
self.legend.figure.canvas.draw()
def _hideLegend(self):
if self.legend.get_visible():
self.legend.set_visible(False)
else:
self.legend.set_visible(True)
self.legend.figure.canvas.draw()
figure = _plt.figure()
ax = figure.add_subplot(111)
scatter = ax.scatter(np.random.randn(100), np.random.randn(100), label='hi')
legend = ax.legend()
legend = DraggableLegend(legend)
_plt.show()
| mit |
wesm/statsmodels | scikits/statsmodels/sandbox/tsa/examples/ex_mle_garch.py | 1 | 10649 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 01:01:50 2010
Author: josef-pktd
latest result
-------------
all are very close
garch0 has different parameterization of constant
ordering of parameters is different
seed 2780185
h.shape (2000,)
Optimization terminated successfully.
Current function value: 2093.813397
Iterations: 387
Function evaluations: 676
ggres.params [-0.6146253 0.1914537 0.01039355 0.78802188]
Optimization terminated successfully.
Current function value: 2093.972953
Iterations: 201
Function evaluations: 372
ggres0.params [-0.61537527 0.19635128 4.00706058]
Warning: Desired error not necessarily achieveddue to precision loss
Current function value: 2093.972953
Iterations: 51
Function evaluations: 551
Gradient evaluations: 110
ggres0.params [-0.61537855 0.19635265 4.00694669]
Optimization terminated successfully.
Current function value: 2093.751420
Iterations: 103
Function evaluations: 187
[ 0.78671519 0.19692222 0.61457171]
-2093.75141963
Final Estimate:
LLH: 2093.750 norm LLH: 2.093750
omega alpha1 beta1
0.7867438 0.1970437 0.6145467
long run variance comparison
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
R
>>> 0.7867438/(1- 0.1970437- 0.6145467)
4.1757097302897526
Garch (gjr) asymetric, longrun var ?
>>> 1/(1-0.6146253 - 0.1914537 - 0.01039355) * 0.78802188
4.2937548579245242
>>> 1/(1-0.6146253 - 0.1914537 + 0.01039355) * 0.78802188
3.8569053452140345
Garch0
>>> (1-0.61537855 - 0.19635265) * 4.00694669
0.7543830449902722
>>> errgjr4.var() #for different random seed
4.0924199964716106
todo: add code and verify, check for longer lagpolys
"""
import numpy as np
from numpy.testing import assert_almost_equal
import matplotlib.pyplot as plt
import numdifftools as ndt
import scikits.statsmodels.api as sm
from scikits.statsmodels.sandbox import tsa
from scikits.statsmodels.sandbox.tsa.garch import * # local import
nobs = 1000
examples = ['garch', 'rpyfit']
if 'garch' in examples:
err,h = generate_kindofgarch(nobs, [1.0, -0.95], [1.0, 0.1], mu=0.5)
plt.figure()
plt.subplot(211)
plt.plot(err)
plt.subplot(212)
plt.plot(h)
#plt.show()
seed = 3842774 #91234 #8837708
seed = np.random.randint(9999999)
print 'seed', seed
np.random.seed(seed)
ar1 = -0.9
err,h = generate_garch(nobs, [1.0, ar1], [1.0, 0.50], mu=0.0,scale=0.1)
# plt.figure()
# plt.subplot(211)
# plt.plot(err)
# plt.subplot(212)
# plt.plot(h)
# plt.figure()
# plt.subplot(211)
# plt.plot(err[-400:])
# plt.subplot(212)
# plt.plot(h[-400:])
#plt.show()
garchplot(err, h)
garchplot(err[-400:], h[-400:])
np.random.seed(seed)
errgjr,hgjr, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.5,0]], mu=0.0,scale=0.1)
garchplot(errgjr[:nobs], hgjr[:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
garchplot(errgjr[-400:nobs], hgjr[-400:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
np.random.seed(seed)
errgjr2,hgjr2, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr2[:nobs], hgjr2[:nobs], 'GJR-GARCH(1,1) Simulation')
garchplot(errgjr2[-400:nobs], hgjr2[-400:nobs], 'GJR-GARCH(1,1) Simulation')
np.random.seed(seed)
errgjr3,hgjr3, etax3 = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9],[0.1,0.9],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr3[:nobs], hgjr3[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr3[-400:nobs], hgjr3[-400:nobs], 'GJR-GARCH(1,3) Simulation')
np.random.seed(seed)
errgjr4,hgjr4, etax4 = generate_gjrgarch(nobs, [1.0, ar1],
[[1., 1,0],[0, 0.1,0.9],[0, 0.1,0.9],[0, 0.1,0.9]],
mu=0.0,scale=0.1)
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
varinno = np.zeros(100)
varinno[0] = 1.
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, -0.],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
garchplot(errgjr5[:20], hgjr5[:20], 'GJR-GARCH(1,3) Simulation')
#garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
#plt.show()
seed = np.random.randint(9999999) # 9188410
print 'seed', seed
x = np.arange(20).reshape(10,2)
x3 = np.column_stack((np.ones((x.shape[0],1)),x))
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,0.0,0]]),x3)
nobs = 1000
warmup = 1000
np.random.seed(seed)
ar = [1.0, -0.7]#7, -0.16, -0.1]
#ma = [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]]
ma = [[1., 0, 0],[0, 0.8,0.0]] #,[0, 0.9,0.0]]
# errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, [1.0, -0.99],
# [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]],
# mu=0.2, scale=0.25)
errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, ar, ma,
mu=0.4, scale=1.01)
errgjr4,hgjr4, etax4 = errgjr4[warmup:], hgjr4[warmup:], etax4[warmup:]
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation - DGP')
ggmod = Garch(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.6, 0.1, 0.2, 0.0])
ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
print 'ggres.params', ggres.params
garchplot(ggmod.errorsest, ggmod.h, title='Garch estimated')
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)
print 'ggres0.params', ggres0.params
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, method='bfgs', maxiter=2000)
print 'ggres0.params', ggres0.params
g11res = optimize.fmin(lambda params: -loglike_GARCH11(params, errgjr4-errgjr4.mean())[0], [0.93, 0.9, 0.2])
print g11res
llf = loglike_GARCH11(g11res, errgjr4-errgjr4.mean())
print llf[0]
if 'rpyfit' in examples:
from rpy import r
r.library('fGarch')
f = r.formula('~garch(1, 1)')
fit = r.garchFit(f, data = errgjr4-errgjr4.mean(), include_mean=False)
if 'rpysim' in examples:
from rpy import r
f = r.formula('~garch(1, 1)')
#fit = r.garchFit(f, data = errgjr4)
x = r.garchSim( n = 500)
print 'R acf', tsa.acf(np.power(x,2))[:15]
arma3 = Arma(np.power(x,2))
arma3res = arma3.fit(start_params=[-0.2,0.1,0.5],maxiter=5000)
print arma3res.params
arma3b = Arma(np.power(x,2))
arma3bres = arma3b.fit(start_params=[-0.2,0.1,0.5],maxiter=5000, method='bfgs')
print arma3bres.params
xr = r.garchSim( n = 100)
x = np.asarray(xr)
ggmod = Garch(x-x.mean())
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.6, 0.1, 0.2, 0.0])
ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
print 'ggres.params', ggres.params
g11res = optimize.fmin(lambda params: -loglike_GARCH11(params, x-x.mean())[0], [0.6, 0.6, 0.2])
print g11res
llf = loglike_GARCH11(g11res, x-x.mean())
print llf[0]
garchplot(ggmod.errorsest, ggmod.h, title='Garch estimated')
fit = r.garchFit(f, data = x-x.mean(), include_mean=False, trace=False)
print r.summary(fit)
'''based on R default simulation
model = list(omega = 1e-06, alpha = 0.1, beta = 0.8)
nobs = 1000
(with nobs=500, gjrgarch doesn't do well
>>> ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
Optimization terminated successfully.
Current function value: -448.861335
Iterations: 385
Function evaluations: 690
>>> print 'ggres.params', ggres.params
ggres.params [ -7.75090330e-01 1.57714749e-01 -9.60223930e-02 8.76021411e-07]
rearranged
8.76021411e-07 1.57714749e-01(-9.60223930e-02) 7.75090330e-01
>>> print g11res
[ 2.97459808e-06 7.83128600e-01 2.41110860e-01]
>>> llf = loglike_GARCH11(g11res, x-x.mean())
>>> print llf[0]
442.603541936
Log Likelihood:
-448.9376 normalized: -4.489376
omega alpha1 beta1
1.01632e-06 1.02802e-01 7.57537e-01
'''
''' the following is for errgjr4-errgjr4.mean()
ggres.params [-0.54510407 0.22723132 0.06482633 0.82325803]
Final Estimate:
LLH: 2065.56 norm LLH: 2.06556
mu omega alpha1 beta1
0.07229732 0.83069480 0.26313883 0.53986167
ggres.params [-0.50779163 0.2236606 0.00700036 1.154832
Final Estimate:
LLH: 2116.084 norm LLH: 2.116084
mu omega alpha1 beta1
-4.759227e-17 1.145404e+00 2.288348e-01 5.085949e-01
run3
DGP
0.4/?? 0.8 0.7
gjrgarch:
ggres.params [-0.45196579 0.2569641 0.02201904 1.11942636]
rearranged
const/omega ma1/alpha1 ar1/beta1
1.11942636 0.2569641(+0.02201904) 0.45196579
g11:
[ 1.10262688 0.26680468 0.45724957]
-2055.73912687
R:
Final Estimate:
LLH: 2055.738 norm LLH: 2.055738
mu omega alpha1 beta1
-1.665226e-17 1.102396e+00 2.668712e-01 4.573224e-01
fit = r.garchFit(f, data = errgjr4-errgjr4.mean())
rpy.RPy_RException: Error in solve.default(fit$hessian) :
Lapack routine dgesv: system is exactly singular
run4
DGP:
mu=0.4, scale=1.01
ma = [[1., 0, 0],[0, 0.8,0.0]], ar = [1.0, -0.7]
maybe something wrong with simulation
gjrgarch
ggres.params [-0.50554663 0.24449867 -0.00521004 1.00796791]
rearranged
1.00796791 0.24449867(-0.00521004) 0.50554663
garch11:
[ 1.01258264 0.24149155 0.50479994]
-2056.3877404
R include_constant=False
Final Estimate:
LLH: 2056.397 norm LLH: 2.056397
omega alpha1 beta1
1.0123560 0.2409589 0.5049154
'''
erro,ho, etaxo = generate_gjrgarch(20, ar, ma, mu=0.04, scale=0.01,
varinnovation = np.ones(20))
if 'sp500' in examples:
import tabular as tb
import scikits.timeseries as ts
a = tb.loadSV(r'C:\Josef\work-oth\gspc_table.csv')
s = ts.time_series(a[0]['Close'][::-1],
dates=ts.date_array(a[0]['Date'][::-1],freq="D"))
sp500 = a[0]['Close'][::-1]
sp500r = np.diff(np.log(sp500))
#plt.show()
| bsd-3-clause |
JCardenasRdz/Machine-Learning-4-MRI | Infection_vs_Inflammation/Code/Process_Data.py | 1 | 2713 | # Import Modules as needed
import numpy as np
#import seaborn as sn
import pandas as pd
from pylab import *
from mylocal_functions import *
# ======== T2 MSME============= #
# Make list of all T2.txt files
T2_list = get_ipython().getoutput('ls ../Study_03_CBA/*T2.txt')
# Allocate variables needed for analysis
T2DF=pd.DataFrame()
TR=np.linspace(.012,.012*12,12)
# Fit T2 and construct dataframe
for names in T2_list:
#Convert txt file to array
YDataMatrix=txt_2_array(names)
#Estimate T2
T2time=fitT2(TR,YDataMatrix)
#convert to data frame
df_T2=pd.DataFrame(T2time.T,columns=["Infected","Healthy_R","St_Inf","Healthy_L"])
#df_T2=pd.DataFrame(T2time.T,columns=["ROI-1","ROI-2","ROI-3","ROI-4"])
df_info=name_2_df(names)
df_final=pd.concat([df_T2,df_info], axis=1)
T2DF=T2DF.append(df_final,ignore_index=True)
# Plot T2 Density ROIs 1 and 2
#T2DF[T2DF.Slice==1].iloc[:,:4].plot.density(); title("Slice 01"); xlim((0.025,.15))
#T2DF[T2DF.Slice==2].iloc[:,:4].plot.density(); title("Slice 02"); xlim((0.025,.15))
#T2DF[T2DF.Slice==3].iloc[:,:4].plot.density(); title("Slice 03"); xlim((0.025,.15))
#T2DF[T2DF.Slice==4].iloc[:,:4].plot.density(); title("Slice 04"); xlim((0.025,.15))
#T2DF[T2DF.Slice==5].iloc[:,:4].plot.density(); title("Slice 05"); xlim((0.025,.15))
# ======== CEST============= #
# Make list of all T2.txt files
CEST_list=get_ipython().getoutput('ls ../Study_03_CBA/*CEST.txt')
CEST_DF=pd.DataFrame()
Z=np.zeros((4,110))
def normalize_data(DataMatrix):
rows,cols = DataMatrix.shape
newData = np.zeros_like(DataMatrix)
for row in range(rows):
newData[row,:]=DataMatrix[row,:]/DataMatrix[row,8]
return newData
for names in CEST_list:
#Convert txt file to array
D=txt_2_array(names);
Zn=normalize_data(D.T)
Z=np.concatenate((Z,Zn))
Z=Z[4::,9::]
# define offsets in ppm
a1=np.linspace(-55,-50,9)
ppm=np.linspace(-8,8,101)
full_ppm = np.concatenate((a1, ppm))
# fit CEST data.
y=Z[12,:]
p=fit_L2_scale(ppm,y)
Yhat=Lscale(ppm,p[0],p[1],p[2],p[3],p[4],p[5],p[6]);
plt.figure(figsize=(10,6))
plt.plot(ppm,y,'o',label='Signal');
plt.plot(ppm,1-Yhat,'-',label='Fit');
plt.legend()
## ====== BUILD CEST Predictors ======== #####
CEST_predictors=np.zeros_like(Z)
rows,cols = CEST_predictors.shape
Tissue_Class=np.zeros((4,rows))
for i in range(rows):
p=fit_L2_scale(ppm,Z[i,:])
CEST_predictors[i,:]=Lscale(ppm,p[0],p[1],p[2],p[3],p[4],p[5],p[6]);
Tissue_Class=np.zeros((64,1))
for i in range(4):
Tissue_Class[i::4]=i
CEST_Dataframe=pd.DataFrame(CEST_predictors)
CEST_Dataframe["Tissue_Class"]=Tissue_Class
pd.DataFrame.to_csv(CEST_Dataframe,"CEST_infections.csv",header=True,index=False)
| mit |
sanketloke/scikit-learn | sklearn/gaussian_process/gpr.py | 43 | 18642 | """Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard sklearn estimator API, GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations
and reduce potential numerical issue during fitting. If an array is
passed, it must have the same number of entries as the data used for
fitting and is used as datapoint-dependent noise level. Note that this
is equivalent to adding a WhiteKernel with c=alpha. Allowing to specify
the noise level directly as a parameter is mainly for convenience and
for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y: boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_: array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_: kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_: array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_: array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process regression model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# Normalize target value
if self.normalize_y:
self.y_train_mean = np.mean(y, axis=0)
# demean y
y = y - self.y_train_mean
else:
self.y_train_mean = np.zeros(1)
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self.rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
self.L_ = cholesky(K, lower=True) # Line 2
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
X = check_array(X)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = self.kernel(X)
return y_mean, y_cov
elif return_std:
y_var = self.kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
y_mean = self.y_train_mean + y_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
return y_mean, y_cov
elif return_std:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ki,kj,ij->k", K_trans, K_trans, K_inv)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
| bsd-3-clause |
zymsys/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/hprModelAnal-flute.py | 21 | 2771 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import utilFunctions as UF
import harmonicModel as HM
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/flute-A4.wav'))
w = np.blackman(551)
N = 1024
t = -100
nH = 40
minf0 = 420
maxf0 = 460
f0et = 5
maxnpeaksTwm = 5
minSineDur = .1
harmDevSlope = 0.01
Ns = 512
H = Ns/4
mX, pX = STFT.stftAnal(x, fs, w, N, H)
hfreq, hmag, hphase = HM.harmonicModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur)
xr = UF.sineSubtraction(x, Ns, H, hfreq, hmag, hphase, fs)
mXr, pXr = STFT.stftAnal(xr, fs, hamming(Ns), Ns, H)
maxplotfreq = 5000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(221)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:N*maxplotfreq/fs+1]))
plt.autoscale(tight=True)
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('mX + harmonics (flute-A4.wav)')
plt.subplot(222)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(np.diff(pX[:,:N*maxplotfreq/fs+1],axis=1)))
plt.autoscale(tight=True)
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('pX + harmonics')
plt.subplot(223)
numFrames = int(mXr[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(Ns*maxplotfreq/fs)/Ns
plt.pcolormesh(frmTime, binFreq, np.transpose(mXr[:,:Ns*maxplotfreq/fs+1]))
plt.autoscale(tight=True)
plt.title('mXr')
plt.subplot(224)
numFrames = int(pXr[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(Ns*maxplotfreq/fs)/Ns
plt.pcolormesh(frmTime, binFreq, np.transpose(np.diff(pXr[:,:Ns*maxplotfreq/fs+1],axis=1)))
plt.autoscale(tight=True)
plt.title('pXr')
plt.tight_layout()
plt.savefig('hprModelAnal-flute.png')
UF.wavwrite(5*xr, fs, 'flute-residual.wav')
plt.show()
| agpl-3.0 |
joernhees/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 278 | 3402 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/io/tests/parser/test_textreader.py | 7 | 12917 | # -*- coding: utf-8 -*-
"""
Tests the TextReader class in parsers.pyx, which
is integral to the C engine in parsers.py
"""
from pandas.compat import StringIO, BytesIO, map
from pandas import compat
import os
import sys
import nose
from numpy import nan
import numpy as np
from pandas import DataFrame
from pandas.io.parsers import (read_csv, TextFileReader)
from pandas.util.testing import assert_frame_equal
import pandas.util.testing as tm
from pandas.parser import TextReader
import pandas.parser as parser
class TestTextReader(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def test_file_handle(self):
try:
f = open(self.csv1, 'rb')
reader = TextReader(f)
result = reader.read() # noqa
finally:
f.close()
def test_string_filename(self):
reader = TextReader(self.csv1, header=None)
reader.read()
def test_file_handle_mmap(self):
try:
f = open(self.csv1, 'rb')
reader = TextReader(f, memory_map=True, header=None)
reader.read()
finally:
f.close()
def test_StringIO(self):
with open(self.csv1, 'rb') as f:
text = f.read()
src = BytesIO(text)
reader = TextReader(src, header=None)
reader.read()
def test_string_factorize(self):
# should this be optional?
data = 'a\nb\na\nb\na'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
self.assertEqual(len(set(map(id, result[0]))), 2)
def test_skipinitialspace(self):
data = ('a, b\n'
'a, b\n'
'a, b\n'
'a, b')
reader = TextReader(StringIO(data), skipinitialspace=True,
header=None)
result = reader.read()
self.assert_numpy_array_equal(result[0],
np.array(['a', 'a', 'a', 'a'],
dtype=np.object_))
self.assert_numpy_array_equal(result[1],
np.array(['b', 'b', 'b', 'b'],
dtype=np.object_))
def test_parse_booleans(self):
data = 'True\nFalse\nTrue\nTrue'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
self.assertEqual(result[0].dtype, np.bool_)
def test_delimit_whitespace(self):
data = 'a b\na\t\t "b"\n"a"\t \t b'
reader = TextReader(StringIO(data), delim_whitespace=True,
header=None)
result = reader.read()
self.assert_numpy_array_equal(result[0], np.array(['a', 'a', 'a'],
dtype=np.object_))
self.assert_numpy_array_equal(result[1], np.array(['b', 'b', 'b'],
dtype=np.object_))
def test_embedded_newline(self):
data = 'a\n"hello\nthere"\nthis'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
expected = np.array(['a', 'hello\nthere', 'this'], dtype=np.object_)
self.assert_numpy_array_equal(result[0], expected)
def test_euro_decimal(self):
data = '12345,67\n345,678'
reader = TextReader(StringIO(data), delimiter=':',
decimal=',', header=None)
result = reader.read()
expected = np.array([12345.67, 345.678])
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands(self):
data = '123,456\n12,500'
reader = TextReader(StringIO(data), delimiter=':',
thousands=',', header=None)
result = reader.read()
expected = np.array([123456, 12500], dtype=np.int64)
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands_alt(self):
data = '123.456\n12.500'
reader = TextFileReader(StringIO(data), delimiter=':',
thousands='.', header=None)
result = reader.read()
expected = DataFrame([123456, 12500])
tm.assert_frame_equal(result, expected)
def test_skip_bad_lines(self):
# too many lines, see #2430 for why
data = ('a:b:c\n'
'd:e:f\n'
'g:h:i\n'
'j:k:l:m\n'
'l:m:n\n'
'o:p:q:r')
reader = TextReader(StringIO(data), delimiter=':',
header=None)
self.assertRaises(parser.CParserError, reader.read)
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=False)
result = reader.read()
expected = {0: ['a', 'd', 'g', 'l'],
1: ['b', 'e', 'h', 'm'],
2: ['c', 'f', 'i', 'n']}
assert_array_dicts_equal(result, expected)
stderr = sys.stderr
sys.stderr = StringIO()
try:
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=True)
reader.read()
val = sys.stderr.getvalue()
self.assertTrue('Skipping line 4' in val)
self.assertTrue('Skipping line 6' in val)
finally:
sys.stderr = stderr
def test_header_not_enough_lines(self):
data = ('skip this\n'
'skip this\n'
'a,b,c\n'
'1,2,3\n'
'4,5,6')
reader = TextReader(StringIO(data), delimiter=',', header=2)
header = reader.header
expected = [['a', 'b', 'c']]
self.assertEqual(header, expected)
recs = reader.read()
expected = {0: [1, 4], 1: [2, 5], 2: [3, 6]}
assert_array_dicts_equal(expected, recs)
# not enough rows
self.assertRaises(parser.CParserError, TextReader, StringIO(data),
delimiter=',', header=5, as_recarray=True)
def test_header_not_enough_lines_as_recarray(self):
data = ('skip this\n'
'skip this\n'
'a,b,c\n'
'1,2,3\n'
'4,5,6')
reader = TextReader(StringIO(data), delimiter=',', header=2,
as_recarray=True)
header = reader.header
expected = [['a', 'b', 'c']]
self.assertEqual(header, expected)
recs = reader.read()
expected = {'a': [1, 4], 'b': [2, 5], 'c': [3, 6]}
assert_array_dicts_equal(expected, recs)
# not enough rows
self.assertRaises(parser.CParserError, TextReader, StringIO(data),
delimiter=',', header=5, as_recarray=True)
def test_escapechar(self):
data = ('\\"hello world\"\n'
'\\"hello world\"\n'
'\\"hello world\"')
reader = TextReader(StringIO(data), delimiter=',', header=None,
escapechar='\\')
result = reader.read()
expected = {0: ['"hello world"'] * 3}
assert_array_dicts_equal(result, expected)
def test_eof_has_eol(self):
# handling of new line at EOF
pass
def test_na_substitution(self):
pass
def test_numpy_string_dtype(self):
data = """\
a,1
aa,2
aaa,3
aaaa,4
aaaaa,5"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', header=None,
**kwds)
reader = _make_reader(dtype='S5,i4')
result = reader.read()
self.assertEqual(result[0].dtype, 'S5')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaaa'], dtype='S5')
self.assertTrue((result[0] == ex_values).all())
self.assertEqual(result[1].dtype, 'i4')
reader = _make_reader(dtype='S4')
result = reader.read()
self.assertEqual(result[0].dtype, 'S4')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4')
self.assertTrue((result[0] == ex_values).all())
self.assertEqual(result[1].dtype, 'S4')
def test_numpy_string_dtype_as_recarray(self):
data = """\
a,1
aa,2
aaa,3
aaaa,4
aaaaa,5"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', header=None,
**kwds)
reader = _make_reader(dtype='S4', as_recarray=True)
result = reader.read()
self.assertEqual(result['0'].dtype, 'S4')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4')
self.assertTrue((result['0'] == ex_values).all())
self.assertEqual(result['1'].dtype, 'S4')
def test_pass_dtype(self):
data = """\
one,two
1,a
2,b
3,c
4,d"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', **kwds)
reader = _make_reader(dtype={'one': 'u1', 1: 'S1'})
result = reader.read()
self.assertEqual(result[0].dtype, 'u1')
self.assertEqual(result[1].dtype, 'S1')
reader = _make_reader(dtype={'one': np.uint8, 1: object})
result = reader.read()
self.assertEqual(result[0].dtype, 'u1')
self.assertEqual(result[1].dtype, 'O')
reader = _make_reader(dtype={'one': np.dtype('u1'),
1: np.dtype('O')})
result = reader.read()
self.assertEqual(result[0].dtype, 'u1')
self.assertEqual(result[1].dtype, 'O')
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', **kwds)
reader = _make_reader(usecols=(1, 2))
result = reader.read()
exp = _make_reader().read()
self.assertEqual(len(result), 2)
self.assertTrue((result[1] == exp[1]).all())
self.assertTrue((result[2] == exp[2]).all())
def test_cr_delimited(self):
def _test(text, **kwargs):
nice_text = text.replace('\r', '\r\n')
result = TextReader(StringIO(text), **kwargs).read()
expected = TextReader(StringIO(nice_text), **kwargs).read()
assert_array_dicts_equal(result, expected)
data = 'a,b,c\r1,2,3\r4,5,6\r7,8,9\r10,11,12'
_test(data, delimiter=',')
data = 'a b c\r1 2 3\r4 5 6\r7 8 9\r10 11 12'
_test(data, delim_whitespace=True)
data = 'a,b,c\r1,2,3\r4,5,6\r,88,9\r10,11,12'
_test(data, delimiter=',')
sample = ('A,B,C,D,E,F,G,H,I,J,K,L,M,N,O\r'
'AAAAA,BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\r'
',BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0')
_test(sample, delimiter=',')
data = 'A B C\r 2 3\r4 5 6'
_test(data, delim_whitespace=True)
data = 'A B C\r2 3\r4 5 6'
_test(data, delim_whitespace=True)
def test_empty_field_eof(self):
data = 'a,b,c\n1,2,3\n4,,'
result = TextReader(StringIO(data), delimiter=',').read()
expected = {0: np.array([1, 4]),
1: np.array(['2', ''], dtype=object),
2: np.array(['3', ''], dtype=object)}
assert_array_dicts_equal(result, expected)
# GH5664
a = DataFrame([['b'], [nan]], columns=['a'], index=['a', 'c'])
b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]],
columns=list('abcd'),
index=[1, 1])
c = DataFrame([[1, 2, 3, 4], [6, nan, nan, nan],
[8, 9, 10, 11], [13, 14, nan, nan]],
columns=list('abcd'),
index=[0, 5, 7, 12])
for _ in range(100):
df = read_csv(StringIO('a,b\nc\n'), skiprows=0,
names=['a'], engine='c')
assert_frame_equal(df, a)
df = read_csv(StringIO('1,1,1,1,0\n' * 2 + '\n' * 2),
names=list("abcd"), engine='c')
assert_frame_equal(df, b)
df = read_csv(StringIO('0,1,2,3,4\n5,6\n7,8,9,10,11\n12,13,14'),
names=list('abcd'), engine='c')
assert_frame_equal(df, c)
def assert_array_dicts_equal(left, right):
for k, v in compat.iteritems(left):
assert(np.array_equal(v, right[k]))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
jmmease/pandas | asv_bench/benchmarks/reindex.py | 7 | 6523 | from .pandas_vb_common import *
from random import shuffle
class Reindexing(object):
goal_time = 0.2
def setup(self):
self.rng = DatetimeIndex(start='1/1/1970', periods=10000, freq='1min')
self.df = DataFrame(np.random.rand(10000, 10), index=self.rng,
columns=range(10))
self.df['foo'] = 'bar'
self.rng2 = Index(self.rng[::2])
self.df2 = DataFrame(index=range(10000),
data=np.random.rand(10000, 30), columns=range(30))
# multi-index
N = 5000
K = 200
level1 = tm.makeStringIndex(N).values.repeat(K)
level2 = np.tile(tm.makeStringIndex(K).values, N)
index = MultiIndex.from_arrays([level1, level2])
self.s1 = Series(np.random.randn((N * K)), index=index)
self.s2 = self.s1[::2]
def time_reindex_dates(self):
self.df.reindex(self.rng2)
def time_reindex_columns(self):
self.df2.reindex(columns=self.df.columns[1:5])
def time_reindex_multiindex(self):
self.s1.reindex(self.s2.index)
#----------------------------------------------------------------------
# Pad / backfill
class FillMethod(object):
goal_time = 0.2
def setup(self):
self.rng = date_range('1/1/2000', periods=100000, freq='1min')
self.ts = Series(np.random.randn(len(self.rng)), index=self.rng)
self.ts2 = self.ts[::2]
self.ts3 = self.ts2.reindex(self.ts.index)
self.ts4 = self.ts3.astype('float32')
def pad(self, source_series, target_index):
try:
source_series.reindex(target_index, method='pad')
except:
source_series.reindex(target_index, fillMethod='pad')
def backfill(self, source_series, target_index):
try:
source_series.reindex(target_index, method='backfill')
except:
source_series.reindex(target_index, fillMethod='backfill')
def time_backfill_dates(self):
self.backfill(self.ts2, self.ts.index)
def time_pad_daterange(self):
self.pad(self.ts2, self.ts.index)
def time_backfill(self):
self.ts3.fillna(method='backfill')
def time_backfill_float32(self):
self.ts4.fillna(method='backfill')
def time_pad(self):
self.ts3.fillna(method='pad')
def time_pad_float32(self):
self.ts4.fillna(method='pad')
#----------------------------------------------------------------------
# align on level
class LevelAlign(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex(
levels=[np.arange(10), np.arange(100), np.arange(100)],
labels=[np.arange(10).repeat(10000),
np.tile(np.arange(100).repeat(100), 10),
np.tile(np.tile(np.arange(100), 100), 10)])
random.shuffle(self.index.values)
self.df = DataFrame(np.random.randn(len(self.index), 4),
index=self.index)
self.df_level = DataFrame(np.random.randn(100, 4),
index=self.index.levels[1])
def time_align_level(self):
self.df.align(self.df_level, level=1, copy=False)
def time_reindex_level(self):
self.df_level.reindex(self.df.index, level=1)
#----------------------------------------------------------------------
# drop_duplicates
class Duplicates(object):
goal_time = 0.2
def setup(self):
self.N = 10000
self.K = 10
self.key1 = tm.makeStringIndex(self.N).values.repeat(self.K)
self.key2 = tm.makeStringIndex(self.N).values.repeat(self.K)
self.df = DataFrame({'key1': self.key1, 'key2': self.key2,
'value': np.random.randn((self.N * self.K)),})
self.col_array_list = list(self.df.values.T)
self.df2 = self.df.copy()
self.df2.ix[:10000, :] = np.nan
self.s = Series(np.random.randint(0, 1000, size=10000))
self.s2 = Series(np.tile(tm.makeStringIndex(1000).values, 10))
np.random.seed(1234)
self.N = 1000000
self.K = 10000
self.key1 = np.random.randint(0, self.K, size=self.N)
self.df_int = DataFrame({'key1': self.key1})
self.df_bool = DataFrame({i: np.random.randint(0, 2, size=self.K,
dtype=bool)
for i in range(10)})
def time_frame_drop_dups(self):
self.df.drop_duplicates(['key1', 'key2'])
def time_frame_drop_dups_inplace(self):
self.df.drop_duplicates(['key1', 'key2'], inplace=True)
def time_frame_drop_dups_na(self):
self.df2.drop_duplicates(['key1', 'key2'])
def time_frame_drop_dups_na_inplace(self):
self.df2.drop_duplicates(['key1', 'key2'], inplace=True)
def time_series_drop_dups_int(self):
self.s.drop_duplicates()
def time_series_drop_dups_string(self):
self.s2.drop_duplicates()
def time_frame_drop_dups_int(self):
self.df_int.drop_duplicates()
def time_frame_drop_dups_bool(self):
self.df_bool.drop_duplicates()
#----------------------------------------------------------------------
# blog "pandas escaped the zoo"
class Align(object):
goal_time = 0.2
def setup(self):
n = 50000
indices = tm.makeStringIndex(n)
subsample_size = 40000
def sample(values, k):
sampler = np.arange(len(values))
shuffle(sampler)
return values.take(sampler[:k])
self.x = Series(np.random.randn(50000), indices)
self.y = Series(np.random.randn(subsample_size),
index=sample(indices, subsample_size))
def time_align_series_irregular_string(self):
(self.x + self.y)
class LibFastZip(object):
goal_time = 0.2
def setup(self):
self.N = 10000
self.K = 10
self.key1 = tm.makeStringIndex(self.N).values.repeat(self.K)
self.key2 = tm.makeStringIndex(self.N).values.repeat(self.K)
self.df = DataFrame({'key1': self.key1, 'key2': self.key2, 'value': np.random.randn((self.N * self.K)), })
self.col_array_list = list(self.df.values.T)
self.df2 = self.df.copy()
self.df2.ix[:10000, :] = np.nan
self.col_array_list2 = list(self.df2.values.T)
def time_lib_fast_zip(self):
lib.fast_zip(self.col_array_list)
def time_lib_fast_zip_fillna(self):
lib.fast_zip_fillna(self.col_array_list2)
| bsd-3-clause |
tensorflow/estimator | tensorflow_estimator/python/estimator/canned/dnn_test_fc_v2.py | 1 | 19054 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py with feature_column_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from unittest.mock import patch
from absl.testing import parameterized
import numpy as np
import six
import tensorflow as tf
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.feature_column import feature_column_v2
from tensorflow_estimator.python.estimator.canned import dnn
from tensorflow_estimator.python.estimator.canned import dnn_testing_utils
from tensorflow_estimator.python.estimator.canned import prediction_keys
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.inputs import numpy_io
from tensorflow_estimator.python.estimator.inputs import pandas_io
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn.DNNClassifierV2(*args, **kwargs)
class DNNModelFnV2Test(dnn_testing_utils.BaseDNNModelFnTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(
self, dnn.dnn_model_fn_v2, fc_impl=feature_column_v2)
class DNNLogitFnV2Test(dnn_testing_utils.BaseDNNLogitFnTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNLogitFnTest.__init__(
self, dnn.dnn_logit_fn_builder_v2, fc_impl=feature_column_v2)
class DNNWarmStartingV2Test(dnn_testing_utils.BaseDNNWarmStartingTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNWarmStartingTest.__init__(
self, _dnn_classifier_fn, _dnn_regressor_fn, fc_impl=feature_column_v2)
class DNNClassifierEvaluateV2Test(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn, fc_impl=feature_column_v2)
class DNNClassifierPredictV2Test(dnn_testing_utils.BaseDNNClassifierPredictTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn, fc_impl=feature_column_v2)
class DNNClassifierTrainV2Test(dnn_testing_utils.BaseDNNClassifierTrainTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn, fc_impl=feature_column_v2)
def _dnn_regressor_fn(*args, **kwargs):
return dnn.DNNRegressorV2(*args, **kwargs)
class DNNRegressorEvaluateV2Test(dnn_testing_utils.BaseDNNRegressorEvaluateTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn, fc_impl=feature_column_v2)
class DNNRegressorPredictV2Test(dnn_testing_utils.BaseDNNRegressorPredictTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn, fc_impl=feature_column_v2)
class DNNRegressorTrainV2Test(dnn_testing_utils.BaseDNNRegressorTrainTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn, fc_impl=feature_column_v2)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = tf.queue.FIFOQueue(capacity=100, dtypes=queue_dtypes)
tf.compat.v1.train.queue_runner.add_queue_runner(
tf.compat.v1.train.queue_runner.QueueRunner(
input_queue, [input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
class DNNRegressorIntegrationTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, batch_size):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
est = dnn.DNNRegressorV2(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUATE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
class DNNClassifierIntegrationTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, n_classes, batch_size):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
est = dnn.DNNClassifierV2(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUATE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data}, y=y_data, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data}, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([1], tf.dtypes.int64),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
class DNNTrainingMode(tf.test.TestCase):
"""Tests that training mode propagates to feature columns correctly."""
def setUp(self):
self._model_dir = tempfile.mkdtemp()
self._label_dimension = 1
self._batch_size = 10
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _create_data(self):
data = np.linspace(
0., 2., self._batch_size * self._label_dimension, dtype=np.float32)
return data.reshape(self._batch_size, self._label_dimension)
def _get_estimator(self):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(self._label_dimension,))
]
return dnn.DNNRegressorV2(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=self._label_dimension,
model_dir=self._model_dir)
def test_train_vs_eval_mode(self):
data = self._create_data()
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=self._batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=self._batch_size, shuffle=False)
est = self._get_estimator()
with patch.object(
tf.compat.v2.keras.layers.DenseFeatures, 'call',
return_value=data) as mock_dense_features_call:
est.train(train_input_fn, steps=10)
est.evaluate(eval_input_fn)
train_args, eval_args = mock_dense_features_call.call_args_list
# DenseFeature should have been called with training = True in train.
_, train_training_kwarg = train_args
self.assertTrue(train_training_kwarg['training'])
# DenseFeature should have been called with training = False in eval.
_, eval_training_kwarg = eval_args
self.assertFalse(eval_training_kwarg['training'])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
ryanpstauffer/market-vis | marketvis/quotes.py | 1 | 5030 | # -*- coding: utf-8 -*-
"""
[Python 2.7 (Mayavi is not yet compatible with Python 3+)]
Created on Wed Dec 16 22:44:15 2015
@author: Ryan Stauffer
https://github.com/ryanpstauffer/market-vis
[This module referenced http://www.theodor.io/scraping-google-finance-data-using-pandas/]
Market Visualization Prototype
Quotes Module
"""
from datetime import datetime, date
import pandas as pd
import json
import urllib
import urllib2
import os
def getIntradayData(ticker, interval_seconds=61, num_days=10):
# Specify URL string based on function inputs.
urlString = 'http://www.google.com/finance/getprices?q={0}'.format(ticker.upper())
urlString += "&i={0}&p={1}d&f=d,c".format(interval_seconds,num_days)
# Request the text, and split by each line
r = urllib2.urlopen(urllib2.Request(urlString)).read()
r = r.splitlines()
# Split each line by a comma, starting at the 8th line
r = [line.split(',') for line in r[7:]]
# Save data in Pandas DataFrame
df = pd.DataFrame(r, columns=['Datetime',ticker])
# Convert UNIX to Datetime format
df['Datetime'] = df['Datetime'].apply(lambda x: datetime.fromtimestamp(int(x[1:])))
df.index = df['Datetime']
return df[ticker]
def getDailyData(ticker, startDate, endDate=date.today()):
''' Daily quotes from Google Finance API. Date format='yyyy-mm-dd' '''
ticker = ticker.upper()
urlString = "http://www.google.com/finance/historical?q={0}".format(ticker)
urlString += "&startdate={0}&enddate={1}&output=csv".format(
startDate.strftime('%b %d, %Y'),endDate.strftime('%b %d, %Y'))
#Convert URL output to dataframe
df = pd.read_csv(urllib.urlopen(urlString))
# Convert strings to Datetime format
df[df.columns[0]] = df[df.columns[0]].apply(lambda x: datetime.strptime(x, '%d-%b-%y'))
#Index by date
df.index = df[df.columns[0]]
df.drop(df.columns[0], axis=1, inplace=True)
return df
def getLastPrice(ticker):
'''Returns last price and date time of a given ticker (from Google Finance API)'''
# Specify URL string based on function inputs.
urlString = 'http://www.google.com/finance/info?client=ig&q={0}'.format(ticker.upper())
# Request the text, and split by each line
r = urllib2.urlopen(urllib2.Request(urlString)).read()
obj = json.loads(r[3:])
print(obj)
price = float(obj[0]['l'])
return price
def buildDailyPriceData(tickerList, startDate, endDate):
print('Pulling Market Data for S&P 500 from {0} to {1}'.format(startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d')))
#Build SP500 daily price data (for saving)
firstTicker = tickerList[0]
print(firstTicker)
firstTickerData = getDailyData(firstTicker, startDate, endDate)
firstTickerData.rename(columns={'Close' : firstTicker}, inplace = True)
df = firstTickerData[firstTicker]
for ticker in tickerList[1:]:
print(ticker)
newTicker = getDailyData(ticker, startDate, endDate)
if not newTicker.empty:
newTicker.rename(columns={'Close' : ticker}, inplace = True)
df = pd.concat([df, newTicker[ticker]], axis=1, join='outer')
#Google returns data w/ most recent at the top, this puts data in chrono order
stockPrices = df.sort_index()
print('Pulled data for {0} stocks from {1} to {2}'.format(len(stockPrices.columns), startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d')))
return stockPrices
def buildDummyData():
'''Builds Daily Price Data from a backup .csv file
Used for offline testing purposes
'''
#Select Dates
startDate = datetime.strptime('20120101', '%Y%m%d')
endDate = datetime.strptime('20130101', '%Y%m%d')
#Load dataset from .csv
print("Pulling Market Data from .csv")
dataLoc = os.path.join(os.path.dirname(__file__),"Resources/SP500_daily_price_data.csv")
df = pd.read_csv(dataLoc)
#Convert strings to Datetime format
df[df.columns[0]] = df[df.columns[0]].apply(lambda x: datetime.strptime(x, '%Y-%m-%d'))
df.index = df[df.columns[0]]
df.drop(df.columns[0], axis=1, inplace=True)
#Build Price Table
stockPrices = df[startDate:endDate]
print('Pulled data for {0} stocks from {1} to {2}'.format(len(stockPrices.columns), startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d')))
return stockPrices
def createIndexedPricing(stockPrices, startingIndexValue):
'''Takes a stock prices tables and converts to indexed pricing
(i.e. all prices are relative based on a common starting index value)
Inputs:
stockPrices => a panda DataFrame
startingIndexValue => the value that all prices will start at
'''
#Build Returns Table
stockReturns = stockPrices.pct_change(1)
#Build Indexed Price Table (indexed to 100)
indexedPrices = stockReturns + 1
indexedPrices.iloc[0] = startingIndexValue
indexedPrices = indexedPrices.cumprod(axis=0)
return indexedPrices | mit |
aleju/self-driving-truck | lib/plotting.py | 1 | 13772 | """Classes to handle plotting during the training."""
from __future__ import print_function, division
import math
import cPickle as pickle
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
import time
GROWTH_BY = 500
class History(object):
def __init__(self):
self.line_groups = OrderedDict()
@staticmethod
def from_string(s):
return pickle.loads(s)
def to_string(self):
return pickle.dumps(self, protocol=-1)
@staticmethod
def load_from_filepath(fp):
#return json.loads(open(, "r").read())
with open(fp, "r") as f:
history = pickle.load(f)
return history
def save_to_filepath(self, fp):
with open(fp, "w") as f:
pickle.dump(self, f, protocol=-1)
def add_group(self, group_name, line_names, increasing=True):
self.line_groups[group_name] = LineGroup(group_name, line_names, increasing=increasing)
def add_value(self, group_name, line_name, x, y, average=False):
self.line_groups[group_name].lines[line_name].append(x, y, average=average)
def get_group_names(self):
return list(self.line_groups.iterkeys())
def get_groups_increasing(self):
return [group.increasing for group in self.line_groups.itervalues()]
def get_max_x(self):
return max([group.get_max_x() for group in self.line_groups.itervalues()])
def get_recent_average(self, group_name, line_name, nb_points):
ys = self.line_groups[group_name].lines[line_name].ys[-nb_points:]
return np.average(ys)
class LineGroup(object):
def __init__(self, group_name, line_names, increasing=True):
self.group_name = group_name
self.lines = OrderedDict([(name, Line()) for name in line_names])
self.increasing = increasing
self.xlim = (None, None)
def get_line_names(self):
return list(self.lines.iterkeys())
def get_line_xs(self):
#return [line.xs for line in self.lines.itervalues()]
"""
for key, line in self.lines.items():
if not hasattr(line, "last_index"):
print(self.group_name, key, "no last index")
else:
print(self.group_name, key, "OK")
print(type(line.xs), type(line.ys), type(line.counts), type(line.datetimes))
"""
return [line.get_xs() for line in self.lines.itervalues()]
def get_line_ys(self):
#return [line.ys for line in self.lines.itervalues()]
return [line.get_ys() for line in self.lines.itervalues()]
def get_max_x(self):
#return max([max(line.xs) if len(line.xs) > 0 else 0 for line in self.lines.itervalues()])
return max([np.maximum(line.get_xs()) if line.last_index > -1 else 0 for line in self.lines.itervalues()])
"""
class Line(object):
def __init__(self, xs=None, ys=None, counts=None, datetimes=None):
self.xs = xs if xs is not None else []
self.ys = ys if ys is not None else []
self.counts = counts if counts is not None else []
self.datetimes = datetimes if datetimes is not None else []
self.last_index = -1
def append(self, x, y, average=False):
# legacy (for loading from pickle)
#if not hasattr(self, "counts"):
# self.counts = [1] * len(self.xs)
# ---
if not average or len(self.xs) == 0 or self.xs[-1] != x:
self.xs.append(x)
self.ys.append(float(y)) # float to get rid of numpy
self.counts.append(1)
self.datetimes.append(time.time())
else:
count = self.counts[-1]
self.ys[-1] = ((self.ys[-1] * count) + y) / (count+1)
self.counts[-1] += 1
self.datetimes[-1] = time.time()
"""
class Line(object):
def __init__(self, xs=None, ys=None, counts=None, datetimes=None):
zeros = np.tile(np.array([0], dtype=np.int32), GROWTH_BY)
self.xs = xs if xs is not None else np.copy(zeros)
self.ys = ys if ys is not None else zeros.astype(np.float32)
self.counts = counts if counts is not None else zeros.astype(np.uint16)
self.datetimes = datetimes if datetimes is not None else zeros.astype(np.uint64)
self.last_index = -1
# for legacy as functions, replace with properties
def get_xs(self):
# legacy
if isinstance(self.xs, list):
self._legacy_convert_from_list_to_np()
return self.xs[0:self.last_index+1]
def get_ys(self):
return self.ys[0:self.last_index+1]
def get_counts(self):
return self.counts[0:self.last_index+1]
def get_datetimes(self):
return self.datetimes[0:self.last_index+1]
def _legacy_convert_from_list_to_np(self):
#print("is list!")
print("[plotting] Converting from list to numpy...")
self.last_index = len(self.xs) - 1
self.xs = np.array(self.xs, dtype=np.int32)
self.ys = np.array(self.ys, dtype=np.float32)
self.counts = np.array(self.counts, dtype=np.uint16)
self.datetimes = np.array([int(dt*1000) for dt in self.datetimes], dtype=np.uint64)
def append(self, x, y, average=False):
# legacy (for loading from pickle)
#if not hasattr(self, "counts"):
# self.counts = [1] * len(self.xs)
# ---
#legacy
if isinstance(self.xs, list):
self._legacy_convert_from_list_to_np()
if (self.last_index+1) == self.xs.shape[0]:
#print("growing from %d by %d..." % (self.xs.shape[0], GROWTH_BY), self.xs.shape, self.ys.shape, self.counts.shape, self.datetimes.shape)
zeros = np.tile(np.array([0], dtype=np.int32), GROWTH_BY)
self.xs = np.append(self.xs, np.copy(zeros))
self.ys = np.append(self.ys, zeros.astype(np.float32))
self.counts = np.append(self.counts, zeros.astype(np.uint16))
self.datetimes = np.append(self.datetimes, zeros.astype(np.uint64))
#print("growing done", self.xs.shape, self.ys.shape, self.counts.shape, self.datetimes.shape)
first_entry = (self.last_index == -1)
if not average or first_entry or self.xs[self.last_index] != x:
idx = self.last_index + 1
self.xs[idx] = x
self.ys[idx] = y
self.counts[idx] = 1
self.datetimes[idx] = int(time.time()*1000)
self.last_index = idx
else:
idx = self.last_index
count = self.counts[idx]
self.ys[idx] = ((self.ys[idx] * count) + y) / (count+1)
self.counts[idx] = count + 1
self.datetimes[idx] = int(time.time()*1000)
#print("added", x, y, average)
#print(self.xs[self.last_index-10:self.last_index+10+1])
#print(self.ys[self.last_index-10:self.last_index+10+1])
#print(self.counts[self.last_index-10:self.last_index+10+1])
#print(self.datetimes[self.last_index-10:self.last_index+10+1])
class LossPlotter(object):
def __init__(self, titles, increasing, save_to_fp):
assert len(titles) == len(increasing)
n_plots = len(titles)
self.titles = titles
self.increasing = dict([(title, incr) for title, incr in zip(titles, increasing)])
self.xlim = dict([(title, (None, None)) for title in titles])
self.colors = ["red", "blue", "cyan", "magenta", "orange", "black"]
self.nb_points_max = 500
self.save_to_fp = save_to_fp
self.start_batch_idx = 0
self.autolimit_y = False
self.autolimit_y_multiplier = 5
#self.fig, self.axes = plt.subplots(nrows=2, ncols=2, figsize=(20, 20))
nrows = max(1, int(math.sqrt(n_plots)))
ncols = int(math.ceil(n_plots / nrows))
width = ncols * 10
height = nrows * 10
self.fig, self.axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(width, height))
if nrows == 1 and ncols == 1:
self.axes = [self.axes]
else:
self.axes = self.axes.flat
title_to_ax = dict()
for idx, (title, ax) in enumerate(zip(self.titles, self.axes)):
title_to_ax[title] = ax
self.title_to_ax = title_to_ax
self.fig.tight_layout()
self.fig.subplots_adjust(left=0.05)
def plot(self, history):
for plot_idx, title in enumerate(self.titles):
ax = self.title_to_ax[title]
group_name = title
group_increasing = self.increasing[title]
group = history.line_groups[title]
line_names = group.get_line_names()
#print("getting line x/y...", time.time())
line_xs = group.get_line_xs()
line_ys = group.get_line_ys()
#print("getting line x/y FIN", time.time())
"""
print("title", title)
print("line_names", line_names)
for i, xx in enumerate(line_xs):
print("line_xs i: ", xx)
for i, yy in enumerate(line_ys):
print("line_ys i: ", yy)
"""
if any([len(xx) > 0 for xx in line_xs]):
xs_min = min([min(xx) for xx in line_xs if len(xx) > 0])
xs_max = max([max(xx) for xx in line_xs if len(xx) > 0])
xlim = self.xlim[title]
xlim = [
max(xs_min, self.start_batch_idx) if xlim[0] is None else min(xlim[0], xs_max-1),
xs_max+1 if xlim[1] is None else xlim[1]
]
if xlim[0] < 0:
xlim[0] = max(xs_max - abs(xlim[0]), 0)
if xlim[1] < 0:
xlim[1] = max(xs_max - abs(xlim[1]), 1)
else:
# none of the lines has any value, so just use dummy values
# to avoid min/max of empty sequence errors
xlim = [
0 if self.xlim[title][0] is None else self.xlim[title][0],
1 if self.xlim[title][1] is None else self.xlim[title][1]
]
self._plot_group(ax, group_name, group_increasing, line_names, line_xs, line_ys, xlim)
self.fig.savefig(self.save_to_fp)
# this seems to be slow sometimes
def _line_to_xy(self, line_x, line_y, xlim, limit_y_min=None, limit_y_max=None):
def _add_point(points_x, points_y, curr_sum, counter):
points_x.append(batch_idx)
y = curr_sum / counter
if limit_y_min is not None and limit_y_max is not None:
y = np.clip(y, limit_y_min, limit_y_max)
elif limit_y_min is not None:
y = max(y, limit_y_min)
elif limit_y_max is not None:
y = min(y, limit_y_max)
points_y.append(y)
nb_points = 0
for i in range(len(line_x)):
batch_idx = line_x[i]
if xlim[0] <= batch_idx < xlim[1]:
nb_points += 1
point_every = max(1, int(nb_points / self.nb_points_max))
points_x = []
points_y = []
curr_sum = 0
counter = 0
for i in range(len(line_x)):
batch_idx = line_x[i]
if xlim[0] <= batch_idx < xlim[1]:
curr_sum += line_y[i]
counter += 1
if counter >= point_every:
_add_point(points_x, points_y, curr_sum, counter)
counter = 0
curr_sum = 0
if counter > 0:
_add_point(points_x, points_y, curr_sum, counter)
return points_x, points_y
def _plot_group(self, ax, group_name, group_increasing, line_names, line_xs, line_ys, xlim):
ax.cla()
ax.grid()
if self.autolimit_y and any([len(line_xs) > 0 for line_xs in line_xs]):
min_x = min([np.min(line_x) for line_x in line_xs])
max_x = max([np.max(line_x) for line_x in line_xs])
min_y = min([np.min(line_y) for line_y in line_ys])
max_y = max([np.max(line_y) for line_y in line_ys])
if group_increasing:
if max_y > 0:
limit_y_max = None
limit_y_min = max_y / self.autolimit_y_multiplier
if min_y > limit_y_min:
limit_y_min = None
else:
if min_y > 0:
limit_y_max = min_y * self.autolimit_y_multiplier
limit_y_min = None
if max_y < limit_y_max:
limit_y_max = None
if limit_y_min is not None:
ax.plot((min_x, max_x), (limit_y_min, limit_y_min), c="purple")
if limit_y_max is not None:
ax.plot((min_x, max_x), (limit_y_max, limit_y_max), c="purple")
# y achse range begrenzen
yaxmin = min_y if limit_y_min is None else limit_y_min
yaxmax = max_y if limit_y_max is None else limit_y_max
yrange = yaxmax - yaxmin
yaxmin = yaxmin - (0.05 * yrange)
yaxmax = yaxmax + (0.05 * yrange)
ax.set_ylim([yaxmin, yaxmax])
else:
limit_y_min = None
limit_y_max = None
for line_name, line_x, line_y, line_col in zip(line_names, line_xs, line_ys, self.colors):
#print("line to xy...", time.time())
x, y = self._line_to_xy(line_x, line_y, xlim, limit_y_min=limit_y_min, limit_y_max=limit_y_max)
#print("line to xy FIN", time.time())
#print("plotting ax...", time.time())
ax.plot(x, y, color=line_col, linewidth=1.0)
#print("plotting ax FIN", time.time())
ax.set_title(group_name)
| mit |
valsson/MD-MC-Codes-2016 | HarmonicOscillator-MD/HarmonicOscillator-MD-Verlet.py | 1 | 4262 | #! /usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from DataTools import writeDataToFile
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--time-step',dest='time_step',required=False)
parser.add_argument('--output-file',dest='fn_out',required=False)
args = parser.parse_args()
# Parameters of potential
m = 1.0
k = (2.0*np.pi)**2
angular_freq = np.sqrt(k/m)
freq = angular_freq/(2.0*np.pi)
period = 1.0/freq
# MD Parameters
if(args.time_step):
time_step = np.float64(args.time_step)
else:
time_step = 0.01*period
if(args.fn_out):
fn_out = args.fn_out
else:
fn_out = 'results.data'
showPlots = False
#num_periods = 20
#num_steps = np.int(np.rint( (num_periods*period)/time_step ))
num_steps = 10000
# initial postion and velocity at t=0
initial_position = 2.0
initial_velocity = 0.0
def getPotentialEnergy(x):
potential_ener = 0.5*k*x**2
return potential_ener
#-------------------------------
def getForce(x):
force = -k*x
return force
#-------------------------------
def getAccleration(x):
return getForce(x)/m
#-------------------------------
def getPotentialAndForce(x):
return ( getPotentialEnergy(x), getForce(x) )
#-------------------------------
def getKineticEnergy(v):
kinetic_ener = 0.5*m*v**2
return kinetic_ener
#-------------------------------
def getTotalEnergy(x,v):
return getPotentialEnergy(x)+getKineticEnergy(v)
#-------------------------------
# analytical solution:
phi = np.arctan(-initial_velocity/(initial_position*angular_freq))
amplitude = initial_position/np.cos(phi)
conserved_energy = getPotentialEnergy(amplitude)
# ----------------------
times = []
positions = []
velocites = []
pot_energies = []
kin_energies = []
tot_energies = []
time = 0.0
curr_position = initial_position
prev_position = curr_position-initial_velocity*time_step + 0.5*getAccleration(curr_position)*time_step**2
curr_velocity = initial_velocity
for i in range(num_steps):
if (i+1) % (num_steps/10) == 0:
print 'MD step {0:6d} of {1:6d}'.format(i+1,num_steps)
# get force at t
accleration = getAccleration(curr_position)
# get new position at t+dt
new_position = 2.0*curr_position - prev_position + accleration*time_step**2
# get velocity at t
curr_velocity = (new_position - prev_position) / (2.0*time_step)
# get energies at t
curr_pot_ener = getPotentialEnergy(curr_position)
curr_kin_ener = getKineticEnergy(curr_velocity)
curr_tot_ener = curr_pot_ener + curr_kin_ener
#
times.append( time )
positions.append( curr_position )
velocites.append( curr_velocity )
pot_energies.append( curr_pot_ener )
kin_energies.append( curr_kin_ener )
tot_energies.append( curr_tot_ener )
#
prev_position = curr_position
curr_position = new_position
time += time_step
#
#----------------------------------------
times = np.array(times)
positions = np.array(positions)
velocites = np.array(velocites)
pot_energies = np.array(pot_energies)
kin_energies = np.array(kin_energies)
tot_energies = np.array(tot_energies)
positions_analytical = amplitude*np.cos(angular_freq*times+phi)
velocites_analytical = -angular_freq*amplitude*np.sin(angular_freq*times+phi)
writeDataToFile(fn_out,
[times,positions,velocites,pot_energies,kin_energies,tot_energies,positions_analytical,velocites_analytical],
['time','pos','vel','pot_ene','kin_ene','tot_ene','pos_an','vel_an'],
constantsNames=['time_step','period','amplitude','k','m','phi','conserved_energy'],
constantsValues=[time_step,period,amplitude,k,m,phi,conserved_energy],
dataFormat='%15.8f')
if showPlots:
plt.figure(1)
plt.plot(times,tot_energies)
plt.plot(times,pot_energies)
plt.plot(times,kin_energies)
plt.show()
plt.figure(2)
plt.plot(times,pot_energies)
plt.show()
plt.figure(3)
plt.plot(times,kin_energies)
plt.show()
plt.figure(4)
plt.plot(times,velocites)
plt.show()
plt.figure(5)
plt.plot(times,positions)
plt.plot(times,positions_analytical)
plt.show()
plt.figure(6)
plt.plot(times,positions-positions_analytical)
plt.show()
#
| mit |
QuLogic/specfem1d | Python_version/grid.py | 2 | 2988 | # -*- coding: utf-8 -*-
'''
Definitions of the grid.
'''
from __future__ import (absolute_import, division, print_function)
import numpy as np
import functions
import gll
class OneDimensionalGrid(object):
"""Contains the grid properties"""
def __init__(self, param):
"""Init"""
self.param = param
self.z = np.zeros(param.nGlob)
self.rho = np.zeros((param.nSpec, param.nGLL))
self.mu = np.zeros((param.nSpec, param.nGLL))
self.ticks = np.zeros(param.nSpec + 1)
if param.gridType == 'homogeneous':
self.ticks = np.linspace(0, param.length, param.nSpec + 1)
self.rho.fill(param.meanRho)
self.mu.fill(param.meanMu)
self.z[1:param.nGLJ] = functions.project_inverse(
param.ksiGLJ[1:param.nGLJ],
0,
self.ticks)
ksiGLL = param.ksiGLL[1:]
for i in range(param.nGLL, param.nGlob, param.N):
self.z[i:i + param.N] = functions.project_inverse(ksiGLL,
i // param.N,
self.ticks)
self.z[-1] = self.ticks[-1]
elif param.gridType == 'gradient':
msg = "typeOfGrid == 'gradient' has not been implemented yet"
raise NotImplementedError(msg)
elif param.gridType == 'miscellaneous':
msg = "typeOfGrid == 'miscellaneous' has not been implemented yet"
raise NotImplementedError(msg)
elif param.gridType == 'file':
self.z, self.rho, self.mu = np.loadtxt(param.gridFile, unpack=True)
self.ticks = np.loadtxt(param.ticksFile)
else:
raise ValueError('Unknown grid type: %s' % (param.gridType, ))
# Jacobians at the GLL (and GLJ for the first element in axisym)
# points (arrays nSpec*(N+1) elements)
self.dXdKsi = gll.jacobian(self.ticks, param)
self.dKsiDx = gll.jacobian_inverse(self.ticks, param)
def plot(self):
"""Plot the grid
my_ticks gives the abscissa of the borders
TODO I should test : _the types of the parameters
_their sizes"""
import matplotlib.pyplot as plt
from matplotlib.ticker import FixedLocator
fig, ax = plt.subplots(2, 1, sharex=True)
ax[0].plot(self.z[self.param.ibool].flat, self.rho.flat, 'b+')
ax[0].set_title(r'$\rho(z)$')
ax[0].xaxis.set_minor_locator(FixedLocator(self.ticks))
ax[0].xaxis.grid(True, which='minor', alpha=0.5)
ax[0].yaxis.grid(True)
ax[1].plot(self.z[self.param.ibool].flat, self.mu.flat, 'r+')
ax[1].set_title(r'$\mu(z)$')
ax[1].xaxis.set_minor_locator(FixedLocator(self.ticks))
ax[1].xaxis.grid(True, which='minor', alpha=0.5)
ax[1].yaxis.grid(True)
plt.suptitle('Grid')
plt.show()
| gpl-2.0 |
tawsifkhan/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
anntzer/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 8 | 2074 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import ConfusionMatrixDisplay
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
class_names = iris.target_names
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01).fit(X_train, y_train)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
titles_options = [("Confusion matrix, without normalization", None),
("Normalized confusion matrix", 'true')]
for title, normalize in titles_options:
disp = ConfusionMatrixDisplay.from_estimator(
classifier, X_test, y_test, display_labels=class_names,
cmap=plt.cm.Blues, normalize=normalize
)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show()
| bsd-3-clause |
cleesmith/sentdex_scikit_machine_learning_tutorial_for_investing | p06.py | 2 | 1516 | import pandas as pd
import os
import time
from datetime import datetime
# path = "X:/Backups/intraQuarter" # for Windows with X files :)
# if git clone'ed then use relative path,
# assuming you extracted the downloaded zip into this project's folder:
path = "intraQuarter"
def Key_Stats(gather="Total Debt/Equity (mrq)"):
statspath = path+'/_KeyStats'
stock_list = [x[0] for x in os.walk(statspath)]
df = pd.DataFrame(columns = ['Date','Unix','Ticker','DE Ratio'])
for each_dir in stock_list[1:]:
each_file = os.listdir(each_dir)
# ticker = each_dir.split("\\")[1] # Windows only
# ticker = each_dir.split("/")[1] # this didn't work so do this:
ticker = os.path.basename(os.path.normpath(each_dir))
# print(ticker) # uncomment to verify
if len(each_file) > 0:
for file in each_file:
date_stamp = datetime.strptime(file, '%Y%m%d%H%M%S.html')
unix_time = time.mktime(date_stamp.timetuple())
full_file_path = each_dir+'/'+file
source = open(full_file_path,'r').read()
try:
value = float(source.split(gather+':</td><td class="yfnc_tabledata1">')[1].split('</td>')[0])
# print("value=%s"%value) # uncomment to see what's up
df = df.append({'Date':date_stamp,'Unix':unix_time,'Ticker':ticker,'DE Ratio':value,}, ignore_index = True)
except Exception as e:
pass
save = gather.replace(' ','').replace(')','').replace('(','').replace('/','')+('.csv')
print(save)
df.to_csv(save)
Key_Stats() | mit |
CroatianMeteorNetwork/RMS | RMS/Astrometry/CheckFit.py | 1 | 25717 | """ Automatic refining of astrometry calibration. The initial astrometric calibration is needed, which will be
refined by using all stars from a given night.
"""
from __future__ import print_function, division, absolute_import
import os
import sys
import copy
import shutil
import random
import argparse
import numpy as np
import scipy.optimize
import matplotlib.pyplot as plt
import RMS.ConfigReader as cr
from RMS.Formats import Platepar
from RMS.Formats import CALSTARS
from RMS.Formats import StarCatalog
from RMS.Formats import FFfile
from RMS.Astrometry.ApplyAstrometry import raDecToXYPP, xyToRaDecPP, rotationWrtHorizon, getFOVSelectionRadius
from RMS.Astrometry.Conversions import date2JD, jd2Date, raDec2AltAz
from RMS.Astrometry.FFTalign import alignPlatepar
from RMS.Math import angularSeparation
# Import Cython functions
import pyximport
pyximport.install(setup_args={'include_dirs':[np.get_include()]})
from RMS.Astrometry.CyFunctions import matchStars, subsetCatalog
def computeMinimizationTolerances(config, platepar, star_dict_len):
""" Compute tolerances for minimization. """
# Calculate the function tolerance, so the desired precision can be reached (the number is calculated
# in the same regard as the cost function)
fatol = (config.dist_check_threshold**2)/np.sqrt(star_dict_len*config.min_matched_stars + 1)
# Parameter estimation tolerance for angular values
fov_w = platepar.X_res/platepar.F_scale
xatol_ang = config.dist_check_threshold*fov_w/platepar.X_res
return fatol, xatol_ang
def matchStarsResiduals(config, platepar, catalog_stars, star_dict, match_radius, ret_nmatch=False, \
sky_coords=False, lim_mag=None, verbose=False):
""" Match the image and catalog stars with the given astrometry solution and estimate the residuals
between them.
Arguments:
config: [Config structure]
platepar: [Platepar structure] Astrometry parameters.
catalog_stars: [ndarray] An array of catalog stars (ra, dec, mag).
star_dict: [ndarray] A dictionary where the keys are JDs when the stars were recorded and values are
2D list of stars, each entry is (X, Y, bg_level, level, fwhm).
match_radius: [float] Maximum radius for star matching (pixels).
min_matched_stars: [int] Minimum number of matched stars on the image for the image to be accepted.
Keyword arguments:
ret_nmatch: [bool] If True, the function returns the number of matched stars and the average
deviation. False by default.
sky_coords: [bool] If True, sky coordinate residuals in RA, dec will be used to compute the cost,
function, not image coordinates.
lim_mag: [float] Override the limiting magnitude from config. None by default.
verbose: [bool] Print results. True by default.
Return:
cost: [float] The cost function which weights the number of matched stars and the average deviation.
"""
if lim_mag is None:
lim_mag = config.catalog_mag_limit
# Estimate the FOV radius
fov_radius = getFOVSelectionRadius(platepar)
# Dictionary containing the matched stars, the keys are JDs of every image
matched_stars = {}
# Go through every FF image and its stars
for jd in star_dict:
# Estimate RA,dec of the centre of the FOV
_, RA_c, dec_c, _ = xyToRaDecPP([jd2Date(jd)], [platepar.X_res/2], [platepar.Y_res/2], [1], \
platepar, extinction_correction=False)
RA_c = RA_c[0]
dec_c = dec_c[0]
# Get stars from the catalog around the defined center in a given radius
_, extracted_catalog = subsetCatalog(catalog_stars, RA_c, dec_c, jd, platepar.lat, platepar.lon, \
fov_radius, lim_mag)
ra_catalog, dec_catalog, mag_catalog = extracted_catalog.T
# Extract stars for the given Julian date
stars_list = star_dict[jd]
stars_list = np.array(stars_list)
# Convert all catalog stars to image coordinates
cat_x_array, cat_y_array = raDecToXYPP(ra_catalog, dec_catalog, jd, platepar)
# Take only those stars which are within the FOV
x_indices = np.argwhere((cat_x_array >= 0) & (cat_x_array < platepar.X_res))
y_indices = np.argwhere((cat_y_array >= 0) & (cat_y_array < platepar.Y_res))
cat_good_indices = np.intersect1d(x_indices, y_indices).astype(np.uint32)
# cat_x_array = cat_x_array[good_indices]
# cat_y_array = cat_y_array[good_indices]
# # Plot image stars
# im_y, im_x, _, _ = stars_list.T
# plt.scatter(im_y, im_x, facecolors='none', edgecolor='g')
# # Plot catalog stars
# plt.scatter(cat_y_array[cat_good_indices], cat_x_array[cat_good_indices], c='r', s=20, marker='+')
# plt.show()
# Match image and catalog stars
matched_indices = matchStars(stars_list, cat_x_array, cat_y_array, cat_good_indices, match_radius)
# Skip this image is no stars were matched
if len(matched_indices) < config.min_matched_stars:
continue
matched_indices = np.array(matched_indices)
matched_img_inds, matched_cat_inds, dist_list = matched_indices.T
# Extract data from matched stars
matched_img_stars = stars_list[matched_img_inds.astype(np.int)]
matched_cat_stars = extracted_catalog[matched_cat_inds.astype(np.int)]
# Put the matched stars to a dictionary
matched_stars[jd] = [matched_img_stars, matched_cat_stars, dist_list]
# # Plot matched stars
# im_y, im_x, _, _ = matched_img_stars.T
# cat_y = cat_y_array[matched_cat_inds.astype(np.int)]
# cat_x = cat_x_array[matched_cat_inds.astype(np.int)]
# plt.scatter(im_x, im_y, c='r', s=5)
# plt.scatter(cat_x, cat_y, facecolors='none', edgecolor='g')
# plt.xlim([0, platepar.X_res])
# plt.ylim([platepar.Y_res, 0])
# plt.show()
# If residuals on the image should be computed
if not sky_coords:
unit_label = 'px'
# Extract all distances
global_dist_list = []
# level_list = []
# mag_list = []
for jd in matched_stars:
# matched_img_stars, matched_cat_stars, dist_list = matched_stars[jd]
_, _, dist_list = matched_stars[jd]
global_dist_list += dist_list.tolist()
# # TEST
# level_list += matched_img_stars[:, 3].tolist()
# mag_list += matched_cat_stars[:, 2].tolist()
# # Plot levels vs. magnitudes
# plt.scatter(mag_list, np.log10(level_list))
# plt.xlabel('Magnitude')
# plt.ylabel('Log10 level')
# plt.show()
# Compute the residuals on the sky
else:
unit_label = 'arcmin'
global_dist_list = []
# Go through all matched stars
for jd in matched_stars:
matched_img_stars, matched_cat_stars, dist_list = matched_stars[jd]
# Go through all stars on the image
for img_star_entry, cat_star_entry in zip(matched_img_stars, matched_cat_stars):
# Extract star coords
star_y = img_star_entry[0]
star_x = img_star_entry[1]
cat_ra = cat_star_entry[0]
cat_dec = cat_star_entry[1]
# Convert image coordinates to RA/Dec
_, star_ra, star_dec, _ = xyToRaDecPP([jd2Date(jd)], [star_x], [star_y], [1], \
platepar, extinction_correction=False)
# Compute angular distance between the predicted and the catalog position
ang_dist = np.degrees(angularSeparation(np.radians(cat_ra), np.radians(cat_dec), \
np.radians(star_ra[0]), np.radians(star_dec[0])))
# Store the angular separation in arc minutes
global_dist_list.append(ang_dist*60)
# Number of matched stars
n_matched = len(global_dist_list)
if n_matched == 0:
if verbose:
print('No matched stars with radius {:.1f} px!'.format(match_radius))
if ret_nmatch:
return 0, 9999.0, 9999.0, {}
else:
return 9999.0
# Calculate the average distance
avg_dist = np.median(global_dist_list)
cost = (avg_dist**2)*(1.0/np.sqrt(n_matched + 1))
if verbose:
print()
print("Matched {:d} stars with radius of {:.1f} px".format(n_matched, match_radius))
print(" Average distance = {:.3f} {:s}".format(avg_dist, unit_label))
print(" Cost function = {:.5f}".format(cost))
if ret_nmatch:
return n_matched, avg_dist, cost, matched_stars
else:
return cost
def checkFitGoodness(config, platepar, catalog_stars, star_dict, match_radius, verbose=False):
""" Checks if the platepar is 'good enough', given the extracted star positions. Returns True if the
fit is deemed good, False otherwise. The goodness of fit is determined by 2 criteria: the average
star residual (in pixels) has to be below a certain threshold, and an average number of matched stars
per image has to be above a predefined threshold as well.
Arguments:
config: [Config structure]
platepar: [Platepar structure] Initial astrometry parameters.
catalog_stars: [ndarray] An array of catalog stars (ra, dec, mag).
star_dict: [ndarray] A dictionary where the keys are JDs when the stars were recorded and values are
2D list of stars, each entry is (X, Y, bg_level, level).
match_radius: [float] Maximum radius for star matching (pixels).
Keyword arguments:
verbose: [bool] If True, fit status will be printed on the screen. False by default.
Return:
[bool] True if the platepar is good, False otherwise.
"""
if verbose:
print()
print("CHECK FIT GOODNESS:")
# Match the stars and calculate the residuals
n_matched, avg_dist, cost, matched_stars = matchStarsResiduals(config, platepar, catalog_stars, \
star_dict, match_radius, ret_nmatch=True, verbose=verbose)
# ### Plot zenith distance vs. residual
# # Go through all images
# for jd in matched_stars:
# _, cat_stars, dists = matched_stars[jd]
# # Extract RA/Dec
# ra, dec, _ = cat_stars.T
# zangle_list = []
# for ra_t, dec_t in zip(ra, dec):
# # Compute zenith distance
# azim, elev = raDec2AltAz(ra_t, dec_t, jd, platepar.lat, platepar.lon)
# zangle = 90 - elev
# zangle_list.append(zangle)
# # Plot zangle vs. distance
# plt.scatter(zangle_list, dists, c='k', s=0.1)
# plt.xlabel('Zenith angle')
# plt.ylabel('Residual (px)')
# plt.show()
# ###
# Check that the average distance is within the threshold
if avg_dist <= config.dist_check_threshold:
if verbose:
print()
print('The minimum residual is satisfied!')
# Check that the minimum number of stars is matched per every image
if n_matched >= len(star_dict)*1:
return True
else:
if verbose:
print('But there are not enough stars on every image, recalibrating...')
return False
def _calcImageResidualsAstro(params, config, platepar, catalog_stars, star_dict, match_radius):
""" Calculates the differences between the stars on the image and catalog stars in image coordinates with
the given astrometrical solution.
Arguments:
params: [list] Fit parameters - reference RA, Dec, position angle, and scale.
config: [Config]
platepar: [Platepar]
catalog_stars: [list] List of (ra, dec, mag) entries (angles in degrees).
star_dict: [dict] Dictionary which contains the JD, and a list of (X, Y, bg_intens, intens) of the
stars on the image.
match_radius: [float] Star match radius (px).
Return:
[float] The average pixel residual (difference between image and catalog positions) normalized
by the square root of the total number of matched stars.
"""
# Make a copy of the platepar
pp = copy.deepcopy(platepar)
# Extract fitting parameters
ra_ref, dec_ref, pos_angle_ref, F_scale = params
# Set the fitting parameters to the platepar clone
pp.RA_d = ra_ref
pp.dec_d = dec_ref
pp.pos_angle_ref = pos_angle_ref
pp.F_scale = F_scale
# Match stars and calculate image residuals
return matchStarsResiduals(config, pp, catalog_stars, star_dict, match_radius, verbose=False)
def starListToDict(config, calstars_list, max_ffs=None):
""" Converts the list of calstars into dictionary where the keys are FF file JD and the values is
a list of (X, Y, bg_intens, intens) of stars.
"""
# Convert the list to a dictionary
calstars = {ff_file: star_data for ff_file, star_data in calstars_list}
# Dictionary which will contain the JD, and a list of (X, Y, bg_intens, intens) of the stars
star_dict = {}
# Take only those files with enough stars on them
for ff_name in calstars:
stars_list = calstars[ff_name]
# Check if there are enough stars on the image
if len(stars_list) >= config.ff_min_stars:
# Calculate the JD time of the FF file
dt = FFfile.getMiddleTimeFF(ff_name, config.fps, ret_milliseconds=True)
jd = date2JD(*dt)
# Add the time and the stars to the dict
star_dict[jd] = stars_list
if max_ffs is not None:
# Limit the number of FF files used
if len(star_dict) > max_ffs:
# Randomly choose calstars_files_N image files from the whole list
rand_keys = random.sample(list(star_dict), max_ffs)
star_dict = {key: star_dict[key] for key in rand_keys}
return star_dict
def autoCheckFit(config, platepar, calstars_list, _fft_refinement=False):
""" Attempts to refine the astrometry fit with the given stars and and initial astrometry parameters.
Arguments:
config: [Config structure]
platepar: [Platepar structure] Initial astrometry parameters.
calstars_list: [list] A list containing stars extracted from FF files. See RMS.Formats.CALSTARS for
more details.
Keyword arguments:
_fft_refinement: [bool] Internal flag indicating that autoCF is running the second time recursively
after FFT platepar adjustment.
Return:
(platepar, fit_status):
platepar: [Platepar structure] Estimated/refined platepar.
fit_status: [bool] True if fit was successfuly, False if not.
"""
def _handleFailure(config, platepar, calstars_list, catalog_stars, _fft_refinement):
""" Run FFT alignment before giving up on ACF. """
if not _fft_refinement:
print()
print("-------------------------------------------------------------------------------")
print('The initial platepar is bad, trying to refine it using FFT phase correlation...')
print()
# Prepare data for FFT image registration
calstars_dict = {ff_file: star_data for ff_file, star_data in calstars_list}
# Extract star list from CALSTARS file from FF file with most stars
max_len_ff = max(calstars_dict, key=lambda k: len(calstars_dict[k]))
# Take only X, Y (change order so X is first)
calstars_coords = np.array(calstars_dict[max_len_ff])[:, :2]
calstars_coords[:, [0, 1]] = calstars_coords[:, [1, 0]]
# Get the time of the FF file
calstars_time = FFfile.getMiddleTimeFF(max_len_ff, config.fps, ret_milliseconds=True)
# Try aligning the platepar using FFT image registration
platepar_refined = alignPlatepar(config, platepar, calstars_time, calstars_coords)
print()
### If there are still not enough stars matched, try FFT again ###
min_radius = 10
# Prepare star dictionary to check the match
dt = FFfile.getMiddleTimeFF(max_len_ff, config.fps, ret_milliseconds=True)
jd = date2JD(*dt)
star_dict_temp = {}
star_dict_temp[jd] = calstars_dict[max_len_ff]
# Check the number of matched stars
n_matched, _, _, _ = matchStarsResiduals(config, platepar_refined, catalog_stars, \
star_dict_temp, min_radius, ret_nmatch=True, verbose=True)
# Realign again if necessary
if n_matched < config.min_matched_stars:
print()
print("-------------------------------------------------------------------------------")
print('Doing a second FFT pass as the number of matched stars was too small...')
print()
platepar_refined = alignPlatepar(config, platepar_refined, calstars_time, calstars_coords)
print()
### ###
# Redo autoCF
return autoCheckFit(config, platepar_refined, calstars_list, _fft_refinement=True)
else:
print('Auto Check Fit failed completely, please redo the plate manually!')
return platepar, False
if _fft_refinement:
print('Second ACF run with an updated platepar via FFT phase correlation...')
# Load catalog stars (overwrite the mag band ratios if specific catalog is used)
catalog_stars, _, config.star_catalog_band_ratios = StarCatalog.readStarCatalog(config.star_catalog_path, \
config.star_catalog_file, lim_mag=config.catalog_mag_limit, \
mag_band_ratios=config.star_catalog_band_ratios)
# Dictionary which will contain the JD, and a list of (X, Y, bg_intens, intens) of the stars
star_dict = starListToDict(config, calstars_list, max_ffs=config.calstars_files_N)
# There has to be a minimum of 200 FF files for star fitting
if len(star_dict) < config.calstars_files_N:
print('Not enough FF files in CALSTARS for ACF!')
return platepar, False
# Calculate the total number of calibration stars used
total_calstars = sum([len(star_dict[key]) for key in star_dict])
print('Total calstars:', total_calstars)
if total_calstars < config.calstars_min_stars:
print('Not enough calibration stars, need at least', config.calstars_min_stars)
return platepar, False
print()
# A list of matching radiuses to try
min_radius = 0.5
radius_list = [10, 5, 3, 1.5, min_radius]
# Calculate the function tolerance, so the desired precision can be reached (the number is calculated
# in the same regard as the cost function)
fatol, xatol_ang = computeMinimizationTolerances(config, platepar, len(star_dict))
### If the initial match is good enough, do only quick recalibratoin ###
# Match the stars and calculate the residuals
n_matched, avg_dist, cost, _ = matchStarsResiduals(config, platepar, catalog_stars, star_dict, \
min_radius, ret_nmatch=True)
if n_matched >= config.calstars_files_N:
# Check if the average distance with the tightest radius is close
if avg_dist < config.dist_check_quick_threshold:
print("Using quick fit with smaller radiia...")
# Use a reduced set of initial radius values
radius_list = [1.5, min_radius]
##########
# Match increasingly smaller search radiia around image stars
for i, match_radius in enumerate(radius_list):
# Match the stars and calculate the residuals
n_matched, avg_dist, cost, _ = matchStarsResiduals(config, platepar, catalog_stars, star_dict, \
match_radius, ret_nmatch=True)
print()
print("-------------------------------------------------------------")
print("Refining camera pointing with max pixel deviation = {:.1f} px".format(match_radius))
print("Initial values:")
print(" Matched stars = {:>6d}".format(n_matched))
print(" Average deviation = {:>6.2f} px".format(avg_dist))
# The initial number of matched stars has to be at least the number of FF imaages, otherwise it means
# that the initial platepar is no good
if n_matched < config.calstars_files_N:
print("The total number of initially matched stars is too small! Please manually redo the plate or make sure there are enough calibration stars.")
# Try to refine the platepar with FFT phase correlation and redo the ACF
return _handleFailure(config, platepar, calstars_list, catalog_stars, _fft_refinement)
# Check if the platepar is good enough and do not estimate further parameters
if checkFitGoodness(config, platepar, catalog_stars, star_dict, min_radius, verbose=True):
# Print out notice only if the platepar is good right away
if i == 0:
print("Initial platepar is good enough!")
return platepar, True
# Initial parameters for the astrometric fit
p0 = [platepar.RA_d, platepar.dec_d, platepar.pos_angle_ref, platepar.F_scale]
# Fit the astrometric parameters
res = scipy.optimize.minimize(_calcImageResidualsAstro, p0, args=(config, platepar, catalog_stars, \
star_dict, match_radius), method='Nelder-Mead', \
options={'fatol': fatol, 'xatol': xatol_ang})
print(res)
# If the fit was not successful, stop further fitting
if not res.success:
# Try to refine the platepar with FFT phase correlation and redo the ACF
return _handleFailure(config, platepar, calstars_list, catalog_stars, _fft_refinement)
else:
# If the fit was successful, use the new parameters from now on
ra_ref, dec_ref, pos_angle_ref, F_scale = res.x
platepar.RA_d = ra_ref
platepar.dec_d = dec_ref
platepar.pos_angle_ref = pos_angle_ref
platepar.F_scale = F_scale
# Check if the platepar is good enough and do not estimate further parameters
if checkFitGoodness(config, platepar, catalog_stars, star_dict, min_radius, verbose=True):
return platepar, True
# Match the stars and calculate the residuals
n_matched, avg_dist, cost, matched_stars = matchStarsResiduals(config, platepar, catalog_stars, \
star_dict, min_radius, ret_nmatch=True)
print("FINAL SOLUTION with radius {:.1} px:".format(min_radius))
print(" Matched stars = {:>6d}".format(n_matched))
print(" Average deviation = {:>6.2f} px".format(avg_dist))
# Mark the platepar to indicate that it was automatically refined with CheckFit
platepar.auto_check_fit_refined = True
# Recompute alt/az of the FOV centre
platepar.az_centre, platepar.alt_centre = raDec2AltAz(platepar.RA_d, platepar.dec_d, platepar.JD, \
platepar.lat, platepar.lon)
# Recompute the rotation wrt horizon
platepar.rotation_from_horiz = rotationWrtHorizon(platepar)
return platepar, True
if __name__ == "__main__":
### COMMAND LINE ARGUMENTS
# Init the command line arguments parser
arg_parser = argparse.ArgumentParser(description="Check if the calibration file matches the stars, and improve it.")
arg_parser.add_argument('dir_path', nargs=1, metavar='DIR_PATH', type=str, \
help='Path to the folder with FF or image files. This folder also has to contain the platepar file.')
arg_parser.add_argument('-c', '--config', nargs=1, metavar='CONFIG_PATH', type=str, \
help="Path to a config file which will be used instead of the default one.")
# Parse the command line arguments
cml_args = arg_parser.parse_args()
#########################
dir_path = cml_args.dir_path[0]
# Check if the given directory is OK
if not os.path.exists(dir_path):
print('No such directory:', dir_path)
sys.exit()
# Load the config file
config = cr.loadConfigFromDirectory(cml_args.config, dir_path)
# Get a list of files in the night folder
file_list = os.listdir(dir_path)
# Find and load the platepar file
if config.platepar_name in file_list:
# Load the platepar
platepar = Platepar.Platepar()
platepar.read(os.path.join(dir_path, config.platepar_name), use_flat=config.use_flat)
else:
print('Cannot find the platepar file in the night directory: ', config.platepar_name)
sys.exit()
# Find the CALSTARS file in the given folder
calstars_file = None
for calstars_file in file_list:
if ('CALSTARS' in calstars_file) and ('.txt' in calstars_file):
break
if calstars_file is None:
print('CALSTARS file could not be found in the given directory!')
sys.exit()
# Load the calstars file
calstars_list = CALSTARS.readCALSTARS(dir_path, calstars_file)
print('CALSTARS file: ' + calstars_file + ' loaded!')
# Run the automatic astrometry fit
pp, fit_status = autoCheckFit(config, platepar, calstars_list)
# If the fit suceeded, save the platepar
if fit_status:
print('ACF sucessful!')
# Save the old platepar
shutil.move(os.path.join(dir_path, config.platepar_name), os.path.join(dir_path,
config.platepar_name + '.old'))
# Save the new platepar
pp.write(os.path.join(dir_path, config.platepar_name)) | gpl-3.0 |
mehdidc/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
cscorley/doc2vec-feature-location | scripts/boxplots.py | 2 | 1180 |
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import gensim
import src.main
# In[2]:
def get_all_ranks(project):
r_lda = [x for x,y,z in src.main.read_ranks(project, 'release_lda')]
r_lsi = [x for x,y,z in src.main.read_ranks(project, 'release_lsi')]
c_lda = [x for x,y,z in src.main.read_ranks(project, 'changeset_lda')]
c_lsi = [x for x,y,z in src.main.read_ranks(project, 'changeset_lsi')]
try:
t_lda = [x for x,y,z in src.main.read_ranks(project, 'temporal_lda')]
t_lsi = [x for x,y,z in src.main.read_ranks(project, 'temporal_lsi')]
except:
t_lda = []
t_lsi = []
return r_lda, c_lda, t_lda, r_lsi, c_lsi, t_lsi
# In[3]:
projects = src.main.load_projects()
# In[8]:
for project in projects:
ranks = get_all_ranks(project)
fig = plt.figure(dpi=300)
fig.gca().boxplot(ranks,
labels=['S-LDA', 'C-LDA', 'T-LDA', 'S-LSI', 'C-LSI', 'T-LSI'])
fig.gca().set_title(' '.join([project.name, project.version, project.level]))
plt.savefig('paper/figures/' + project.name + project.version + project.level + '.png')
plt.close()
# In[ ]:
| bsd-3-clause |
jmetzen/scikit-learn | examples/svm/plot_iris.py | 225 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
mayblue9/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 21 | 13171 | import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.validation import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 disticnt words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,
invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,
invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
| bsd-3-clause |
joshbohde/scikit-learn | sklearn/linear_model/sparse/logistic.py | 2 | 3922 | """
Sparse Logistic Regression module
This module has the same API as sklearn.linear_model.logistic, but is
designed to handle efficiently data in sparse matrix format.
"""
import numpy as np
import scipy.sparse as sp
from ...base import ClassifierMixin
from ...svm.sparse.base import SparseBaseLibLinear
from ...linear_model.sparse.base import CoefSelectTransformerMixin
from ...svm.liblinear import csr_predict_prob
class LogisticRegression(SparseBaseLibLinear, ClassifierMixin,
CoefSelectTransformerMixin):
"""
Logistic Regression.
Implements L1 and L2 regularized logistic regression.
Parameters
----------
penalty : string, 'l1' or 'l2'
Used to specify the norm used in the penalization
dual : boolean
Dual or primal formulation. Dual formulation is only
implemented for l2 penalty.
C : float
Specifies the strength of the regularization. The smaller it is
the bigger in the regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function
intercept_scaling : float, default: 1
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
tol: float, optional
tolerance for stopping criteria
Attributes
----------
`coef_` : array, shape = [n_classes-1, n_features]
Coefficient of the features in the decision function.
`intercept_` : array, shape = [n_classes-1]
intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
See also
--------
LinearSVC
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1):
super(LogisticRegression, self).__init__ (penalty=penalty,
dual=dual, loss='lr', tol=tol, C=C,
fit_intercept=fit_intercept, intercept_scaling=intercept_scaling)
def predict_proba(self, X):
"""
Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
"""
X = sp.csr_matrix(X)
X.data = np.asanyarray(X.data, dtype=np.float64, order='C')
probas = csr_predict_prob(X.shape[1], X.data, X.indices,
X.indptr, self.raw_coef_,
self._get_solver_type(),
self.tol, self.C,
self.class_weight_label,
self.class_weight, self.label_,
self._get_bias())
return probas[:,np.argsort(self.label_)]
def predict_log_proba(self, T):
"""
Log of Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
"""
return np.log(self.predict_proba(T))
| bsd-3-clause |
cogstat/cogstat | cogstat/test/test_stat.py | 1 | 22429 | # -*- coding: utf-8 -*-
import unittest
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
print(sys.path)
from pathlib import Path
import numpy as np
import pandas as pd
from cogstat import cogstat as cs
print(cs.__file__)
print(cs.__version__)
print(os.path.abspath(cs.__file__))
"""
- All statistical value should be tested at least once.
- All leafs of the decision tree should be tested once.
- Tests shouldn't give p<0.001 results, because exact values cannot be tested.
- No need to test the details of the statistical methods imported from other modules,
because that is the job of that specific module.
- All variables should be used with 3 digits decimal precision, to ensure that copying
the data for validation no additional rounding happens.
"""
#cs.output_type = 'do not format'
np.random.seed(555)
# https://docs.scipy.org/doc/numpy/reference/routines.random.html
# Make sure to use round function to have the same precision of the data when copied to other software
data_np = np.vstack((
np.round(np.random.normal(loc=3, scale=3, size=30), 3),
np.round(np.random.lognormal(mean=3, sigma=3, size=30), 3),
np.random.randint(3, size=30),
np.random.randint(3, size=30),
np.round(np.random.normal(loc=3, scale=3, size=30), 3),
np.round(np.random.lognormal(mean=1.4, sigma=0.6, size=30), 3),
np.round(np.random.normal(loc=6, scale=3, size=30), 3),
np.round(np.random.normal(loc=7, scale=6, size=30), 3),
np.random.randint(2, size=30),
np.random.randint(2, size=30),
np.random.randint(2, size=30),
np.concatenate((np.round(np.random.normal(loc=3, scale=3, size=15), 3),
np.round(np.random.normal(loc=4, scale=3, size=15), 3))),
np.array([1]*15+[2]*15),
np.array([1]+[2]*29),
np.concatenate((np.round(np.random.normal(loc=3, scale=3, size=15), 3),
np.round(np.random.lognormal(mean=1.5, sigma=2.0, size=15), 3))),
np.concatenate((np.round(np.random.normal(loc=3, scale=3, size=15), 3),
np.round(np.random.normal(loc=3, scale=7, size=15), 3))),
np.array([1]*10+[2]*8+[3]*12),
np.concatenate((np.round(np.random.normal(loc=3, scale=3, size=10), 3),
np.round(np.random.normal(loc=3, scale=3, size=8), 3),
np.round(np.random.normal(loc=6, scale=3, size=12), 3)))
))
data_pd = pd.DataFrame(data_np.T, columns=
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r'])
data = cs.CogStatData(data=data_pd, measurement_levels=['int', 'int', 'nom', 'nom', 'int', 'int', 'int', 'int', 'nom',
'nom', 'nom', 'int', 'nom', 'nom', 'int', 'int', 'int', 'int'])
#pd.set_option('display.expand_frame_repr', False)
#print (data_pd)
class CogStatTestCase(unittest.TestCase):
"""Unit tests for CogStat."""
def test_explore_variables(self):
"""Test explore variables"""
# Int variable
result = data.explore_variable('a', 1, 2.0)
#for i, res in enumerate(result): print(i, res)
self.assertTrue('N of valid cases: 30' in result[2])
self.assertTrue('N of missing cases: 0' in result[2])
self.assertTrue('<td>Mean</td> <td>3.1438</td>' in result[4])
self.assertTrue('<td>Standard deviation</td> <td>3.2152</td>' in result[4])
self.assertTrue('<td>Skewness</td> <td>0.3586</td>' in result[4])
self.assertTrue('<td>Kurtosis</td> <td>0.0446</td>' in result[4])
self.assertTrue('<td>Range</td> <td>12.7840</td>' in result[4])
self.assertTrue('<td>Maximum</td> <td>9.9810</td>' in result[4])
self.assertTrue('<td>Upper quartile</td> <td>4.3875</td>' in result[4])
self.assertTrue('<td>Median</td> <td>2.8545</td>' in result[4])
self.assertTrue('<td>Lower quartile</td> <td>1.4190</td>' in result[4])
self.assertTrue('<td>Minimum</td> <td>-2.8030</td>' in result[4])
# Shapiro–Wilk normality
self.assertTrue('<i>W</i> = 0.96' in result[6]) # <i>W</i> = 0.959
self.assertTrue('<i>p</i> = .287' in result[6])
# Population estimation and one sample t-test
self.assertTrue('<td>Mean</td> <td>3.1438</td> <td>1.9227</td> <td>4.3649</td>' in result[9])
self.assertTrue('<td>Standard deviation</td> <td>3.2702</td> <td>2.6044</td> <td>4.3961</td>' in result[9])
# Sensitivity power analysis
# G*Power 3.1.9.6: 0.6811825
# jamovi v1.2.19.0, jpower 0.1.2: 0.681
self.assertTrue('(effect size is in d): 0.68' in result[11])
self.assertTrue('t</i>(29) = 1.92' in result[11])
self.assertTrue('p</i> = .065' in result[11])
# Wilcoxon signed-rank test for non-normal interval variable
result = data.explore_variable('b', 0, 20.0)
self.assertTrue('T</i> = 203' in result[11])
self.assertTrue('p</i> = .551' in result[11])
# Ord variable
data.data_measlevs['a'] = 'ord'
result = data.explore_variable('a', 1, 2.0)
self.assertTrue('N of valid cases: 30' in result[2])
self.assertTrue('N of missing cases: 0' in result[2])
self.assertTrue('<td>Maximum</td> <td>9.9810</td>' in result[4])
self.assertTrue('<td>Upper quartile</td> <td>4.3875</td>' in result[4])
self.assertTrue('<td>Median</td> <td>2.8545</td>' in result[4])
self.assertTrue('<td>Lower quartile</td> <td>1.4190</td>' in result[4])
self.assertTrue('<td>Minimum</td> <td>-2.8030</td>' in result[4])
# TODO median CI
# Wilcoxon signed-rank test
self.assertTrue('T</i> = 145' in result[9])
self.assertTrue('p</i> = .074' in result[9])
data.data_measlevs['a'] = 'int'
# Nominal variable
#result = data.explore_variable('c')
# TODO variation ratio
# TODO multinomial proportion CI
def test_explore_variable_pairs(self):
"""Test explore variable pairs"""
# Int variables
result = data.explore_variable_pair('a', 'b')
self.assertTrue('N of valid pairs: 30' in result[1])
self.assertTrue('N of missing pairs: 0' in result[1])
self.assertTrue('-0.141' in result[4])
self.assertTrue('[-0.477, 0.231]' in result[6])
self.assertTrue("Pearson's correlation: <i>r</i>(28) = -0.14, <i>p</i> = .456" in result[7]) # <i>r</i>(28) = -0.141
self.assertTrue('y = -21.811x + 300.505' in result[3])
self.assertTrue('-0.363' in result[4])
self.assertTrue('[-0.640, -0.003]' in result[6])
self.assertTrue("Spearman's rank-order correlation: <i>r<sub>s</sub></i>(28) = -0.36, <i>p</i> = .048" in result[7]) # <i>r<sub>s</sub></i>(28) = -0.363
# Ord variables
data.data_measlevs['a'] = 'ord'
data.data_measlevs['b'] = 'ord'
result = data.explore_variable_pair('a', 'b')
self.assertTrue('-0.363' in result[4])
self.assertTrue('[-0.640, -0.003]' in result[5])
self.assertTrue("Spearman's rank-order correlation: <i>r<sub>s</sub></i>(28) = -0.36, <i>p</i> = .048" in result[6]) # <i>r<sub>s</sub></i>(28) = -0.363
data.data_measlevs['a'] = 'int'
data.data_measlevs['b'] = 'int'
# Nom variables
result = data.explore_variable_pair('c', 'd')
self.assertTrue('N of valid pairs: 30' in result[1])
self.assertTrue('N of missing pairs: 0' in result[1])
# Cramer's V
self.assertTrue('<sub>c</sub></i> = 0.372' in result[4])
# Sensitivity power analysis
# G*Power 3.1.9.6, Goodness of fit test, df=4: Contingency tables: 0.7868005
# TODO GPower gives 0.8707028 with df of 8; Seems like statsmodels GofChisquarePower calculates power
# with df=8; should we use 4 or 8 df? https://github.com/cogstat/cogstat/issues/134
self.assertTrue('(effect size is in w): 0.87' in result[6])
# Chi-squared
# jamovi v1.2.19.0: X2, df, p, N: 8.31, 4, 0.081, 30
self.assertTrue('(4, <i>N</i> = 30) = 8.31' in result[6]) # (4, <i>N</i> = 30) = 8.312
self.assertTrue('<i>p</i> = .081' in result[6])
def test_diffusion(self):
"""Test diffusion analysis"""
data_diffusion = cs.CogStatData(data=str(Path('data/diffusion.csv')))
result = data_diffusion.diffusion(error_name=['Error'], RT_name=['RT_sec'], participant_name=['Name'], condition_names=['Num1', 'Num2'])
# Drift rate
self.assertTrue('<td>zsiraf</td> <td>0.190</td> <td>0.276</td> <td>0.197</td> <td>0.235</td> <td>0.213</td>' in result[1])
# Threshold
self.assertTrue('<td>zsiraf</td> <td>0.178</td> <td>0.096</td> <td>0.171</td> <td>0.112</td> <td>0.088</td>' in result[1])
# Nondecision time
self.assertTrue('<td>zsiraf</td> <td>0.481</td> <td>0.590</td> <td>0.483</td> <td>0.561</td> <td>0.522</td>' in result[1])
def test_compare_variables(self):
"""Test compare variables"""
# 2 Int variables
result = data.compare_variables(['a', 'e'])
self.assertTrue('N of valid cases: 30' in result[1])
self.assertTrue('N of missing cases: 0' in result[1])
# Cohen's d
# CS formula: https://pingouin-stats.org/generated/pingouin.compute_effsize.html
# Based on the formula, calculated in LO Calc 6.4: 0.030004573510063
# jamovi v1.2.19.0: 0.0202; formula: https://github.com/jamovi/jmv/blob/master/R/ttestps.b.R#L54-L66
self.assertTrue("<td>Cohen's d</td> <td>0.030</td>" in result[3])
# eta-squared
# CS formula: https://pingouin-stats.org/generated/pingouin.convert_effsize.html
# Based on the formula, calculated in LO Calc 6.4: 0.0002250179634
# jamovi v1.2.19.0: 0.000
self.assertTrue('<td>Eta-squared</td> <td>0.000</td>' in result[3])
# Sample means
self.assertTrue('<td>3.1438</td> <td>3.0502</td>' in result[3])
# Hedges'g (with CI)
# CS formula: https://pingouin-stats.org/generated/pingouin.compute_effsize.html
# https://pingouin-stats.org/generated/pingouin.compute_esci.html
# Note that the latter (CI) method has changed in v0.3.5 https://pingouin-stats.org/changelog.html
# Based on the formula, calculated in LO Calc 7.0: 0.029614903724218, -0.34445335392457, 0.403683161373007
# Note that the last value is 0.404 in LO, not .403 as in pingouin
self.assertTrue("<td>Hedges' g</td> <td>0.030</td> <td>-0.344</td> <td>0.403</td>" in result[5])
self.assertTrue('<i>W</i> = 0.95, <i>p</i> = .215' in result[7]) # <i>W</i> = 0.954
# Sensitivity power analysis
# G*Power 3.1.9.6: 0.6811825
# jamovi v1.2.19.0, jpower 0.1.2: 0.681
self.assertTrue('(effect size is in d): 0.68' in result[7])
# Paired samples t-test
# jamovi v1.2.19.0: t, df, p: 0.110, 29.0, 0.913
self.assertTrue('<i>t</i>(29) = 0.11, <i>p</i> = .913' in result[7])
# 2 Int variables - non-normal
result = data.compare_variables(['e', 'f'])
self.assertTrue('<i>W</i> = 0.91, <i>p</i> = .019' in result[7]) # <i>W</i> = 0.915
self.assertTrue('<i>T</i> = 110.00, <i>p</i> = .012' in result[7])
# 3 Int variables
result = data.compare_variables(['a', 'e', 'g'])
self.assertTrue('<td>3.1438</td> <td>3.0502</td> <td>5.7295</td>' in result[3])
self.assertTrue('a: <i>W</i> = 0.96, <i>p</i> = .287' in result[7]) # <i>W</i> = 0.959
self.assertTrue('e: <i>W</i> = 0.97, <i>p</i> = .435' in result[7]) # <i>W</i> = 0.966
self.assertTrue('g: <i>W</i> = 0.95, <i>p</i> = .133' in result[7]) #x <i>W</i> = 0.946
self.assertTrue('sphericity: <i>W</i> = 0.98, <i>p</i> = .703' in result[7]) # <i>W</i> = 0.975
self.assertTrue('<i>F</i>(2, 58) = 6.17, <i>p</i> = .004' in result[7])
self.assertTrue('0.11, <i>p</i> = .913' in result[7]) # TODO keep the order of the variables, and have a fixed sign
self.assertTrue('3.17, <i>p</i> = .011' in result[7])
self.assertTrue('2.88, <i>p</i> = .015' in result[7])
# 3 Int variables, sphericity violated
result = data.compare_variables(['a', 'e', 'h'])
self.assertTrue('<td>3.1438</td> <td>3.0502</td> <td>6.5786</td>' in result[3])
self.assertTrue('a: <i>W</i> = 0.96, <i>p</i> = .287' in result[7]) # <i>W</i> = 0.959
self.assertTrue('e: <i>W</i> = 0.97, <i>p</i> = .435' in result[7]) # <i>W</i> = 0.966
self.assertTrue('h: <i>W</i> = 0.98, <i>p</i> = .824' in result[7])
self.assertTrue('sphericity: <i>W</i> = 0.79, <i>p</i> = .039' in result[7]) # <i>W</i> = 0.793
self.assertTrue('<i>F</i>(1.66, 48) = 6.16, <i>p</i> = .007' in result[7])
self.assertTrue('0.11, <i>p</i> = .913' in result[7]) # TODO keep the order of the variables, and have a fixed sign
self.assertTrue('2.68, <i>p</i> = .024' in result[7])
self.assertTrue('2.81, <i>p</i> = .026' in result[7])
# 3 Int variables, non-normal
result = data.compare_variables(['a', 'e', 'f'])
self.assertTrue('<td>3.1438</td> <td>3.0502</td> <td>5.3681</td>' in result[3])
self.assertTrue('a: <i>W</i> = 0.96, <i>p</i> = .287' in result[7]) # <i>W</i> = 0.959
self.assertTrue('e: <i>W</i> = 0.97, <i>p</i> = .435' in result[7]) # <i>W</i> = 0.966
self.assertTrue('f: <i>W</i> = 0.82, <i>p</i> < .001' in result[7]) # <i>W</i> = 0.818
self.assertTrue('χ<sup>2</sup>(2, <i>N</i> = 30) = 6.47, <i>p</i> = .039' in result[7])
# 2 × 2 Int variables
result = data.compare_variables(['a', 'b', 'e', 'f'], factors=[['first', 2], ['second', 2]])
self.assertTrue('Main effect of first: <i>F</i>(1, 29) = 6.06, <i>p</i> = .020' in result[7])
self.assertTrue('Main effect of second: <i>F</i>(1, 29) = 6.29, <i>p</i> = .018' in result[7])
self.assertTrue('Interaction of factors first, second: <i>F</i>(1, 29) = 6.04, <i>p</i> = .020' in result[7])
# 2 Ord variables
data.data_measlevs['a'] = 'ord'
data.data_measlevs['e'] = 'ord'
data.data_measlevs['f'] = 'ord'
result = data.compare_variables(['e', 'f'])
self.assertTrue('<td>2.3895</td> <td>4.2275</td>' in result[3])
self.assertTrue('<i>T</i> = 110.00, <i>p</i> = .012' in result[6])
# 3 Ord variables
result = data.compare_variables(['a', 'e', 'f'])
self.assertTrue('<td>2.8545</td> <td>2.3895</td> <td>4.2275</td>' in result[3])
self.assertTrue('χ<sup>2</sup>(2, <i>N</i> = 30) = 6.47, <i>p</i> = .039' in result[6])
data.data_measlevs['a'] = 'int'
data.data_measlevs['e'] = 'int'
data.data_measlevs['f'] = 'int'
# 2 Nom variables
result = data.compare_variables(['i', 'j'])
# TODO on Linux the row labels are 0.0 and 1.0 instead of 0 and 1
self.assertTrue('<td>0.0</td> <td>4</td> <td>9</td> <td>13</td> </tr> <tr> <td>1.0</td> <td>9</td>' in result[3])
self.assertTrue('χ<sup>2</sup>(1, <i>N</i> = 30) = 0.06, <i>p</i> = .814' in result[5]) # χ<sup>2</sup>(1, <i>N</i> = 30) = 0.0556
# 3 Nom variables
result = data.compare_variables(['i', 'j', 'k'])
self.assertTrue('<i>Q</i>(2, <i>N</i> = 30) = 0.78, <i>p</i> = .676' in result[7]) # <i>Q</i>(2, <i>N</i> = 30) = 0.783
def test_compare_groups(self):
"""Test compare groups"""
# 2 Int groups
result = data.compare_groups('l', ['m'])
self.assertTrue('<td>2.5316</td> <td>4.5759</td>' in result[3])
# Cohen's d
# CS formula: https://pingouin-stats.org/generated/pingouin.compute_effsize.html
# Based on the formula, calculated in LO Calc 6.4: -0.704171924382848
# jamovi v1.2.19.0: 0.0704
self.assertTrue("<td>Cohen's d</td> <td>-0.704</td>" in result[3])
# eta-squared
# CS formula: https://pingouin-stats.org/generated/pingouin.convert_effsize.html
# Based on the formula, calculated in LO Calc 6.4: 0.110292204104377
# jamovi v1.2.19.0: 0.117 # TODO why the difference?
self.assertTrue('<td>Eta-squared</td> <td>0.110</td>' in result[3])
# Hedges'g (with CI)
# CS formula: https://pingouin-stats.org/generated/pingouin.compute_effsize.html
# https://pingouin-stats.org/generated/pingouin.compute_esci.html
# Note that the latter (CI) method has changed in v0.3.5 https://pingouin-stats.org/changelog.html
# Based on the formula, calculated in LO Calc 7.0: -0.685140250750879, -1.45474443187683, 0.084463930375068
self.assertTrue('<td>Difference between the two groups:</td> <td>-2.0443</td> <td>-4.2157</td> <td>0.1272</td>' in result[5])
self.assertTrue("<td>Hedges' g</td> <td>-0.685</td> <td>-1.455</td> <td>0.084</td>" in result[6])
self.assertTrue('(m: 1.0): <i>W</i> = 0.96, <i>p</i> = .683' in result[8]) # <i>W</i> = 0.959
self.assertTrue('(m: 2.0): <i>W</i> = 0.98, <i>p</i> = .991' in result[8]) # <i>W</i> = 0.984
self.assertTrue('<i>W</i> = 0.30, <i>p</i> = .585' in result[8]) # <i>W</i> = 0.305
# Sensitivity power analysis
# G*Power 3.1.9.6: 1.3641059
# jamovi v1.2.19.0, jpower 0.1.2: 1.36
self.assertTrue('(effect size is in d): 1.36' in result[8])
# independent samples t-test
# jamovi v1.2.19.0: t, df, p: -1.93, 28.0, 0.064
self.assertTrue('<i>t</i>(28) = -1.93, <i>p</i> = .064' in result[8])
# Non-normal group
result = data.compare_groups('o', ['m'])
self.assertTrue('(m: 2.0): <i>W</i> = 0.81, <i>p</i> = .005' in result[8]) # <i>W</i> = 0.808
self.assertTrue('<i>U</i> = 51.00, <i>p</i> = .011' in result[8])
# Heteroscedastic groups
result = data.compare_groups('p', ['m'])
self.assertTrue('<i>t</i>(25.3) = 0.12, <i>p</i> = .907' in result[8]) # <i>t</i>(25.3) = 0.119
# TODO single case vs. group
# 3 Int groups
result = data.compare_groups('r', ['q'])
self.assertTrue('<td>3.2869</td> <td>5.0400</td> <td>7.2412</td>' in result[3])
self.assertTrue('<i>W</i> = 0.68, <i>p</i> = .517' in result[8]) # TODO this might be incorrect # <i>W</i> = 0.675
# Sensitivity power analysis
# G*Power 3.1.9.6: 0.7597473
self.assertTrue('(effect size is in f): 0.76' in result[8])
self.assertTrue('<i>F</i>(2, 27) = 4.00, <i>p</i> = .030' in result[8])
self.assertTrue('ω<sup>2</sup> = 0.167' in result[6])
# TODO post-hoc
# 3 Int groups with assumption violation
result = data.compare_groups('o', ['q'])
self.assertTrue('χ<sup>2</sup>(2, <i>N</i> = 30) = 8.37, <i>p</i> = .015' in result[8])
# 2 Ord groups
data.data_measlevs['o'] = 'ord'
result = data.compare_groups('o', ['m'])
self.assertTrue('<i>U</i> = 51.00, <i>p</i> = .011' in result[6])
# 3 Ord groups
data.data_measlevs['o'] = 'ord'
result = data.compare_groups('o', ['q'])
self.assertTrue('χ<sup>2</sup>(2, <i>N</i> = 30) = 8.37, <i>p</i> = .015' in result[6])
data.data_measlevs['o'] = 'int'
# 2 Nom groups
result = data.compare_groups('i', ['j'])
self.assertTrue('φ<i><sub>c</sub></i> = 0.154' in result[3]) # TODO validate
self.assertTrue('χ<sup>2</sup></i>(1, <i>N</i> = 30) = 0.71, <i>p</i> = .399' in result[5]) # TODO validate # χ<sup>2</sup></i>(1, <i>N</i> = 30) = 0.710
# 3 Nom groups
result = data.compare_groups('i', ['c'])
self.assertTrue('φ<i><sub>c</sub></i> = 0.009' in result[3]) # TODO validate
self.assertTrue('χ<sup>2</sup></i>(2, <i>N</i> = 30) = 0.00, <i>p</i> = .999' in result[5]) # TODO validate # χ<sup>2</sup></i>(2, <i>N</i> = 30) = 0.002
# 3 × 3 Int groups
result = data.compare_groups('a', ['c', 'd'])
self.assertTrue('<td>Mean</td> <td>1.0695</td> <td>1.8439</td> <td>2.3693</td>' in result[3])
self.assertTrue('<td>Standard deviation</td> <td>2.7005</td> <td>2.0891</td> <td>4.2610</td>' in result[3])
self.assertTrue('<td>Maximum</td> <td>4.4130</td> <td>4.7890</td> <td>9.1600</td>' in result[3])
self.assertTrue('<td>Upper quartile</td> <td>3.0000</td> <td>3.0213</td> <td>4.4028</td>' in result[3])
self.assertTrue('<td>Median</td> <td>1.3340</td> <td>2.4590</td> <td>0.9015</td>' in result[3])
self.assertTrue('<td>Lower quartile</td> <td>-0.5965</td> <td>0.8870</td> <td>-1.1320</td>' in result[3])
self.assertTrue('<td>Minimum</td> <td>-2.8030</td> <td>-2.2890</td> <td>-1.4860</td>' in result[3])
# TODO the two main effects differ from the SPSS result, see issue #91
self.assertTrue('<i>F</i>(2, 21) = 2.35, <i>p</i> = .120' in result[7])
self.assertTrue('<i>F</i>(2, 21) = 0.19, <i>p</i> = .832' in result[7]) # <i>F</i>(2, 21) = 0.185
self.assertTrue('<i>F</i>(4, 21) = 1.15, <i>p</i> = .363' in result[7])
def test_single_case(self):
# Test for the slope stat
data = cs.CogStatData(data='''group slope slope_SE
Patient 0.247 0.069
Control 0.492 0.106
Control 0.559 0.108
Control 0.63 0.116
Control 0.627 0.065
Control 0.674 0.105
Control 0.538 0.107''')
result = data.compare_groups('slope', ['group'], 'slope_SE', 25)
self.assertTrue('Test d.2: <i>t</i>(42.1) = -4.21, <i>p</i> < .001' in result[8])
result = data.compare_groups('slope', ['group'])
self.assertTrue('<i>t</i>(5) = -5.05, <i>p</i> = .004' in result[8])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
okuraoy/mywork | mtlearn/datasets.py | 1 | 2037 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.datasets.base import Bunch
from os.path import join
PATH = "d:\\data"
# class Bunch(dict):
# """Container object for datasets
# Dictionary-like object that exposes its keys as attributes.
#
# See: sklearn.datasets.base.py Bunch
# """
#
# def __init__(self, **kwargs):
# super(Bunch, self).__init__(kwargs)
#
# def __setattr__(self, key, value):
# self[key] = value
#
# def __dir__(self):
# return self.keys()
#
# def __getattr__(self, key):
# try:
# return self[key]
# except KeyError:
# raise AttributeError(key)
#
# def __setstate__(self, state):
# # Bunch pickles generated with scikit-learn 0.16.* have an non
# # empty __dict__. This causes a surprising behaviour when
# # loading these pickles scikit-learn 0.17: reading bunch.key
# # uses __dict__ but assigning to bunch.key use __setattr__ and
# # only changes bunch['key']. More details can be found at:
# # https://github.com/scikit-learn/scikit-learn/issues/6196.
# # Overriding __setstate__ to be a noop has the effect of
# # ignoring the pickled __dict__
# pass
def parse_date(x):
return pd.datetime.strptime(x, '%Y-%m-%d')
def load_pcs_data():
# column: date,pcs,f1,f2,...
# sep='\001',
df = pd.read_csv(join(PATH, 'spu_pcs_20170721.csv'), sep='\001', parse_dates=['date'], date_parser=parse_date)
df.sort_values(by='date')
columns = np.array(df.columns.values)
feature_name = columns[2:]
tmp_data = np.array(df)
inx_data = tmp_data[:, 0]
target = tmp_data[:, 1]
data = tmp_data[:, 2:]
# print shape
print data.shape
print feature_name
return Bunch(data=data, target=target, feature_names=feature_name, inx=inx_data)
if __name__ == '__main__':
load_pcs_data()
| apache-2.0 |
Ziqi-Li/bknqgis | pandas/pandas/tests/plotting/test_series.py | 2 | 32812 | # coding: utf-8
""" Test cases for Series.plot """
import itertools
import pytest
from datetime import datetime
import pandas as pd
from pandas import Series, DataFrame, date_range
from pandas.compat import range, lrange
import pandas.util.testing as tm
import numpy as np
from numpy.random import randn
import pandas.plotting as plotting
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works,
_skip_if_no_scipy_gaussian_kde,
_ok_for_gaussian_kde)
tm._skip_if_no_mpl()
class TestSeriesPlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
@pytest.mark.slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
axes = _check_plot_works(self.ts.plot, rot=0)
self._check_ticks_props(axes, xrot=0)
ax = _check_plot_works(self.ts.plot, style='.', logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
_check_plot_works(self.ts[:10].plot.bar)
_check_plot_works(self.ts.plot.area, stacked=False)
_check_plot_works(self.iseries.plot)
for kind in ['line', 'bar', 'barh', 'kde', 'hist', 'box']:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(self.series[:5].plot, kind=kind)
_check_plot_works(self.series[:10].plot.barh)
ax = _check_plot_works(Series(randn(10)).plot.bar, color='black')
self._check_colors([ax.patches[0]], facecolors=['black'])
# GH 6951
ax = _check_plot_works(self.ts.plot, subplots=True)
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_plot_figsize_and_title(self):
# figsize and title
_, ax = self.plt.subplots()
ax = self.series.plot(title='Test', figsize=(16, 8), ax=ax)
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))
def test_dont_modify_rcParams(self):
# GH 8242
if self.mpl_ge_1_5_0:
key = 'axes.prop_cycle'
else:
key = 'axes.color_cycle'
colors = self.plt.rcParams[key]
_, ax = self.plt.subplots()
Series([1, 2, 3]).plot(ax=ax)
assert colors == self.plt.rcParams[key]
def test_ts_line_lim(self):
fig, ax = self.plt.subplots()
ax = self.ts.plot(ax=ax)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin == lines[0].get_data(orig=False)[0][0]
assert xmax == lines[0].get_data(orig=False)[0][-1]
tm.close()
ax = self.ts.plot(secondary_y=True, ax=ax)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin == lines[0].get_data(orig=False)[0][0]
assert xmax == lines[0].get_data(orig=False)[0][-1]
def test_ts_area_lim(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.area(stacked=False, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin == line[0]
assert xmax == line[-1]
tm.close()
# GH 7471
_, ax = self.plt.subplots()
ax = self.ts.plot.area(stacked=False, x_compat=True, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin == line[0]
assert xmax == line[-1]
tm.close()
tz_ts = self.ts.copy()
tz_ts.index = tz_ts.tz_localize('GMT').tz_convert('CET')
_, ax = self.plt.subplots()
ax = tz_ts.plot.area(stacked=False, x_compat=True, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin == line[0]
assert xmax == line[-1]
tm.close()
_, ax = self.plt.subplots()
ax = tz_ts.plot.area(stacked=False, secondary_y=True, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin == line[0]
assert xmax == line[-1]
def test_label(self):
s = Series([1, 2])
_, ax = self.plt.subplots()
ax = s.plot(label='LABEL', legend=True, ax=ax)
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
_, ax = self.plt.subplots()
ax = s.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=['None'])
self.plt.close()
# get name from index
s.name = 'NAME'
_, ax = self.plt.subplots()
ax = s.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=['NAME'])
self.plt.close()
# override the default
_, ax = self.plt.subplots()
ax = s.plot(legend=True, label='LABEL', ax=ax)
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
# Add lebel info, but don't draw
_, ax = self.plt.subplots()
ax = s.plot(legend=False, label='LABEL', ax=ax)
assert ax.get_legend() is None # Hasn't been drawn
ax.legend() # draw it
self._check_legend_labels(ax, labels=['LABEL'])
def test_line_area_nan_series(self):
values = [1, 2, np.nan, 3]
s = Series(values)
ts = Series(values, index=tm.makeDateIndex(k=4))
for d in [s, ts]:
ax = _check_plot_works(d.plot)
masked = ax.lines[0].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked.data, 2), exp)
tm.assert_numpy_array_equal(
masked.mask, np.array([False, False, True, False]))
expected = np.array([1, 2, 0, 3], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
def test_line_use_index_false(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
s.index.name = 'The Index'
_, ax = self.plt.subplots()
ax = s.plot(use_index=False, ax=ax)
label = ax.get_xlabel()
assert label == ''
_, ax = self.plt.subplots()
ax2 = s.plot.bar(use_index=False, ax=ax)
label2 = ax2.get_xlabel()
assert label2 == ''
@pytest.mark.slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
_, ax = self.plt.subplots()
ax = Series([200, 500]).plot.bar(log=True, ax=ax)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
_, ax = self.plt.subplots()
ax = Series([200, 500]).plot.barh(log=True, ax=ax)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
tm.close()
# GH 9905
expected = np.array([1.0e-03, 1.0e-02, 1.0e-01, 1.0e+00])
if not self.mpl_le_1_2_1:
expected = np.hstack((1.0e-04, expected, 1.0e+01))
if self.mpl_ge_2_0_0:
expected = np.hstack((1.0e-05, expected))
_, ax = self.plt.subplots()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar', ax=ax)
ymin = 0.0007943282347242822 if self.mpl_ge_2_0_0 else 0.001
ymax = 0.12589254117941673 if self.mpl_ge_2_0_0 else .10000000000000001
res = ax.get_ylim()
tm.assert_almost_equal(res[0], ymin)
tm.assert_almost_equal(res[1], ymax)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
_, ax = self.plt.subplots()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='barh', ax=ax)
res = ax.get_xlim()
tm.assert_almost_equal(res[0], ymin)
tm.assert_almost_equal(res[1], ymax)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_bar_ignore_index(self):
df = Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
_, ax = self.plt.subplots()
ax = df.plot.bar(use_index=False, ax=ax)
self._check_text_labels(ax.get_xticklabels(), ['0', '1', '2', '3'])
def test_rotation(self):
df = DataFrame(randn(5, 5))
# Default rot 0
_, ax = self.plt.subplots()
axes = df.plot(ax=ax)
self._check_ticks_props(axes, xrot=0)
_, ax = self.plt.subplots()
axes = df.plot(rot=30, ax=ax)
self._check_ticks_props(axes, xrot=30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
_, ax = self.plt.subplots()
ax = ser.plot(ax=ax)
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
assert xp == ax.get_xlim()[0]
@pytest.mark.slow
def test_pie_series(self):
# if sum of values is less than 1.0, pie handle them as rate and draw
# semicircle.
series = Series(np.random.randint(1, 5),
index=['a', 'b', 'c', 'd', 'e'], name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, series.index)
assert ax.get_ylabel() == 'YLABEL'
# without wedge labels
ax = _check_plot_works(series.plot.pie, labels=None)
self._check_text_labels(ax.texts, [''] * 5)
# with less colors than elements
color_args = ['r', 'g', 'b']
ax = _check_plot_works(series.plot.pie, colors=color_args)
color_expected = ['r', 'g', 'b', 'r', 'g']
self._check_colors(ax.patches, facecolors=color_expected)
# with labels and colors
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
ax = _check_plot_works(series.plot.pie, labels=labels,
colors=color_args)
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
# with autopct and fontsize
ax = _check_plot_works(series.plot.pie, colors=color_args,
autopct='%.2f', fontsize=7)
pcts = ['{0:.2f}'.format(s * 100)
for s in series.values / float(series.sum())]
iters = [iter(series.index), iter(pcts)]
expected_texts = list(next(it) for it in itertools.cycle(iters))
self._check_text_labels(ax.texts, expected_texts)
for t in ax.texts:
assert t.get_fontsize() == 7
# includes negative value
with pytest.raises(ValueError):
series = Series([1, 2, 0, 4, -1], index=['a', 'b', 'c', 'd', 'e'])
series.plot.pie()
# includes nan
series = Series([1, 2, np.nan, 4], index=['a', 'b', 'c', 'd'],
name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, ['a', 'b', '', 'd'])
def test_pie_nan(self):
s = Series([1, np.nan, 1, 1])
_, ax = self.plt.subplots()
ax = s.plot.pie(legend=True, ax=ax)
expected = ['0', '', '2', '3']
result = [x.get_text() for x in ax.texts]
assert result == expected
@pytest.mark.slow
def test_hist_df_kwargs(self):
df = DataFrame(np.random.randn(10, 2))
_, ax = self.plt.subplots()
ax = df.plot.hist(bins=5, ax=ax)
assert len(ax.patches) == 10
@pytest.mark.slow
def test_hist_df_with_nonnumerics(self):
# GH 9853
with tm.RNGContext(1):
df = DataFrame(
np.random.randn(10, 4), columns=['A', 'B', 'C', 'D'])
df['E'] = ['x', 'y'] * 5
_, ax = self.plt.subplots()
ax = df.plot.hist(bins=5, ax=ax)
assert len(ax.patches) == 20
_, ax = self.plt.subplots()
ax = df.plot.hist(ax=ax) # bins=10
assert len(ax.patches) == 40
@pytest.mark.slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist,
by=self.ts.index.month)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist,
by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with pytest.raises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@pytest.mark.slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
assert len(ax.patches) == 2
@pytest.mark.slow
def test_hist_layout(self):
df = self.hist_df
with pytest.raises(ValueError):
df.height.hist(layout=(1, 1))
with pytest.raises(ValueError):
df.height.hist(layout=[1, 1])
@pytest.mark.slow
def test_hist_layout_with_by(self):
df = self.hist_df
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.gender, layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.gender, layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2),
figsize=(12, 7))
@pytest.mark.slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.axes if self.mpl_ge_1_5_0 else fig.get_axes()
assert len(axes) == 2
@pytest.mark.slow
def test_hist_secondary_legend(self):
# GH 9610
df = DataFrame(np.random.randn(30, 4), columns=list('abcd'))
# primary -> secondary
_, ax = self.plt.subplots()
ax = df['a'].plot.hist(legend=True, ax=ax)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b (right)'])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary
_, ax = self.plt.subplots()
ax = df['a'].plot.hist(legend=True, secondary_y=True, ax=ax)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are draw on left ax
# left axis must be invisible, right axis must be visible
self._check_legend_labels(ax.left_ax,
labels=['a (right)', 'b (right)'])
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> primary
_, ax = self.plt.subplots()
ax = df['a'].plot.hist(legend=True, secondary_y=True, ax=ax)
# right axes is returned
df['b'].plot.hist(ax=ax, legend=True)
# both legends are draw on left ax
# left and right axis must be visible
self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b'])
assert ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
@pytest.mark.slow
def test_df_series_secondary_legend(self):
# GH 9779
df = DataFrame(np.random.randn(30, 3), columns=list('abc'))
s = Series(np.random.randn(30), name='x')
# primary -> secondary (without passing ax)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
s.plot(legend=True, secondary_y=True, ax=ax)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# primary -> secondary (with passing ax)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# seconcary -> secondary (without passing ax)
_, ax = self.plt.subplots()
ax = df.plot(secondary_y=True, ax=ax)
s.plot(legend=True, secondary_y=True, ax=ax)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, labels=expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (with passing ax)
_, ax = self.plt.subplots()
ax = df.plot(secondary_y=True, ax=ax)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (with passing ax)
_, ax = self.plt.subplots()
ax = df.plot(secondary_y=True, mark_right=False, ax=ax)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a', 'b', 'c', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
@pytest.mark.slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with pytest.raises(ValueError):
_, ax = self.plt.subplots()
x.plot(style='k--', color='k', ax=ax)
@pytest.mark.slow
def test_hist_kde(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(logy=True, ax=ax)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
# ticks are values, thus ticklabels are blank
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
_check_plot_works(self.ts.plot.kde)
_check_plot_works(self.ts.plot.density)
_, ax = self.plt.subplots()
ax = self.ts.plot.kde(logy=True, ax=ax)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@pytest.mark.slow
def test_kde_kwargs(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from numpy import linspace
_check_plot_works(self.ts.plot.kde, bw_method=.5,
ind=linspace(-100, 100, 20))
_check_plot_works(self.ts.plot.density, bw_method=.5,
ind=linspace(-100, 100, 20))
_, ax = self.plt.subplots()
ax = self.ts.plot.kde(logy=True, bw_method=.5,
ind=linspace(-100, 100, 20), ax=ax)
self._check_ax_scales(ax, yaxis='log')
self._check_text_labels(ax.yaxis.get_label(), 'Density')
@pytest.mark.slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
s = Series(np.random.uniform(size=50))
s[0] = np.nan
axes = _check_plot_works(s.plot.kde)
# gh-14821: check if the values have any missing values
assert any(~np.isnan(axes.lines[0].get_xdata()))
@pytest.mark.slow
def test_hist_kwargs(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(bins=5, ax=ax)
assert len(ax.patches) == 5
self._check_text_labels(ax.yaxis.get_label(), 'Frequency')
tm.close()
if self.mpl_ge_1_3_1:
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(orientation='horizontal', ax=ax)
self._check_text_labels(ax.xaxis.get_label(), 'Frequency')
tm.close()
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(align='left', stacked=True, ax=ax)
tm.close()
@pytest.mark.slow
def test_hist_kde_color(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(logy=True, bins=10, color='b', ax=ax)
self._check_ax_scales(ax, yaxis='log')
assert len(ax.patches) == 10
self._check_colors(ax.patches, facecolors=['b'] * 10)
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
_, ax = self.plt.subplots()
ax = self.ts.plot.kde(logy=True, color='r', ax=ax)
self._check_ax_scales(ax, yaxis='log')
lines = ax.get_lines()
assert len(lines) == 1
self._check_colors(lines, ['r'])
@pytest.mark.slow
def test_boxplot_series(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.box(logy=True, ax=ax)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [self.ts.name])
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@pytest.mark.slow
def test_kind_both_ways(self):
s = Series(range(3))
kinds = (plotting._core._common_kinds +
plotting._core._series_kinds)
_, ax = self.plt.subplots()
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
s.plot(kind=kind, ax=ax)
getattr(s.plot, kind)()
@pytest.mark.slow
def test_invalid_plot_data(self):
s = Series(list('abcd'))
_, ax = self.plt.subplots()
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
s.plot(kind=kind, ax=ax)
@pytest.mark.slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
_, ax = self.plt.subplots()
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
s.plot(kind=kind, ax=ax)
def test_invalid_kind(self):
s = Series([1, 2])
with pytest.raises(ValueError):
s.plot(kind='aasdf')
@pytest.mark.slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@pytest.mark.slow
def test_errorbar_plot(self):
s = Series(np.arange(10), name='x')
s_err = np.random.randn(10)
d_err = DataFrame(randn(10, 2), index=s.index, columns=['x', 'y'])
# test line and bar plots
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=1, yerr=1)
ax = _check_plot_works(s.plot, xerr=s_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
# test time series plotting
ix = date_range('1/1/2000', '1/1/2001', freq='M')
ts = Series(np.arange(12), index=ix, name='x')
ts_err = Series(np.random.randn(12), index=ix)
td_err = DataFrame(randn(12, 2), index=ix, columns=['x', 'y'])
ax = _check_plot_works(ts.plot, yerr=ts_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(ts.plot, yerr=td_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
# check incorrect lengths and types
with pytest.raises(ValueError):
s.plot(yerr=np.arange(11))
s_err = ['zzz'] * 10
# in mpl 1.5+ this is a TypeError
with pytest.raises((ValueError, TypeError)):
s.plot(yerr=s_err)
def test_table(self):
_check_plot_works(self.series.plot, table=True)
_check_plot_works(self.series.plot, table=self.series)
@pytest.mark.slow
def test_series_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(Series([1, 2, 3]),
plotting._core._series_kinds +
plotting._core._common_kinds)
@pytest.mark.slow
def test_standard_colors(self):
from pandas.plotting._style import _get_standard_colors
for c in ['r', 'red', 'green', '#FF0000']:
result = _get_standard_colors(1, color=c)
assert result == [c]
result = _get_standard_colors(1, color=[c])
assert result == [c]
result = _get_standard_colors(3, color=c)
assert result == [c] * 3
result = _get_standard_colors(3, color=[c])
assert result == [c] * 3
@pytest.mark.slow
def test_standard_colors_all(self):
import matplotlib.colors as colors
from pandas.plotting._style import _get_standard_colors
# multiple colors like mediumaquamarine
for c in colors.cnames:
result = _get_standard_colors(num_colors=1, color=c)
assert result == [c]
result = _get_standard_colors(num_colors=1, color=[c])
assert result == [c]
result = _get_standard_colors(num_colors=3, color=c)
assert result == [c] * 3
result = _get_standard_colors(num_colors=3, color=[c])
assert result == [c] * 3
# single letter colors like k
for c in colors.ColorConverter.colors:
result = _get_standard_colors(num_colors=1, color=c)
assert result == [c]
result = _get_standard_colors(num_colors=1, color=[c])
assert result == [c]
result = _get_standard_colors(num_colors=3, color=c)
assert result == [c] * 3
result = _get_standard_colors(num_colors=3, color=[c])
assert result == [c] * 3
def test_series_plot_color_kwargs(self):
# GH1890
_, ax = self.plt.subplots()
ax = Series(np.arange(12) + 1).plot(color='green', ax=ax)
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_kwargs(self):
# #1890
_, ax = self.plt.subplots()
ax = Series(np.arange(12) + 1, index=date_range(
'1/1/2000', periods=12)).plot(color='green', ax=ax)
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_with_empty_kwargs(self):
import matplotlib as mpl
if self.mpl_ge_1_5_0:
def_colors = self._maybe_unpack_cycler(mpl.rcParams)
else:
def_colors = mpl.rcParams['axes.color_cycle']
index = date_range('1/1/2000', periods=12)
s = Series(np.arange(1, 13), index=index)
ncolors = 3
_, ax = self.plt.subplots()
for i in range(ncolors):
ax = s.plot(ax=ax)
self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors])
def test_xticklabels(self):
# GH11529
s = Series(np.arange(10), index=['P%02d' % i for i in range(10)])
_, ax = self.plt.subplots()
ax = s.plot(xticks=[0, 3, 5, 9], ax=ax)
exp = ['P%02d' % i for i in [0, 3, 5, 9]]
self._check_text_labels(ax.get_xticklabels(), exp)
def test_custom_business_day_freq(self):
# GH7222
from pandas.tseries.offsets import CustomBusinessDay
s = Series(range(100, 121), index=pd.bdate_range(
start='2014-05-01', end='2014-06-01',
freq=CustomBusinessDay(holidays=['2014-05-26'])))
_check_plot_works(s.plot)
| gpl-2.0 |
cdr-stats/cdr-stats | cdr_stats/voip_billing/views.py | 2 | 10687 | #
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <[email protected]>
#
from django.contrib.auth.decorators import login_required, permission_required
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from voip_billing.models import VoIPRetailRate
from voip_billing.forms import PrefixRetailRateForm, SimulatorForm, BillingReportForm
from voip_billing.function_def import prefix_allowed_to_call
from voip_billing.rate_engine import rate_engine
from voip_billing.constants import RATE_COLUMN_NAME
from aggregator.pandas_cdr import get_report_cdr_per_switch
from aggregator.aggregate_cdr import custom_sql_aggr_top_country
from cdr.decorators import check_user_detail
from cdr.functions_def import get_switch_ip_addr, calculate_act_acd
from cdr.constants import Export_choice
from common.helpers import trunc_date_start, trunc_date_end
from django_lets_go.common_functions import getvar, get_pagination_vars
from datetime import datetime
import logging
import requests
import ast
import tablib
from apirest.view_voip_rate import find_rates
def rest_api_call(request, api_url):
final_rate_list = []
response = False
try:
response = requests.get(api_url, auth=(request.user, request.user), timeout=1.0)
logging.debug('API requests.get response: ' + response)
except requests.exceptions.Timeout:
# Todo: we may want to deal with error nicely
logging.debug('API Timeout Error : ' + api_url)
except:
logging.debug('API Error : ' + api_url)
if response and response.status_code == 200:
# due to string response of API, we need to convert response in to array
rate_list = response.content.replace('[', '').replace(']', '').replace('}, {', '}|{').split('|')
for i in rate_list:
if i:
# convert string into dict
final_rate_list.append(ast.literal_eval(i))
return final_rate_list
@permission_required('user_profile.call_rate', login_url='/')
@login_required
@check_user_detail('voipplan')
def voip_rates(request):
"""List voip call rates according to country prefix
**Attributes**:
* ``template`` - voip_billing/rates.html
* ``form`` - PrefixRetailRateForm
**Logic Description**:
get all call rates from voip rate API and list them in template
with pagination & sorting column
"""
form = PrefixRetailRateForm(request.POST or None)
final_rate_list = []
# Get pagination data
sort_col_field_list = ['prefix', 'retail_rate', 'destination']
page_data = get_pagination_vars(request, sort_col_field_list, default_sort_field='prefix')
sort_order = page_data['sort_order']
order = 'ASC'
if "-" in sort_order:
order = 'DESC'
sort_order = sort_order[1:]
dialcode = ''
if form.is_valid():
dialcode = request.POST.get('prefix')
request.session['dialcode'] = dialcode
else:
# pagination with prefix code
if (request.session.get('dialcode') and (request.GET.get('page') or request.GET.get('sort_by'))):
dialcode = request.session.get('dialcode')
form = PrefixRetailRateForm(initial={'prefix': dialcode})
else:
# Reset variables
request.session['dialcode'] = ''
dialcode = ''
if hasattr(request.user, 'userprofile'):
voipplan_id = request.user.userprofile.voipplan_id
if dialcode:
final_rate_list = find_rates(voipplan_id, dialcode=dialcode, sort_field=sort_order, order=order)
else:
final_rate_list = find_rates(voipplan_id, dialcode=None, sort_field=sort_order, order=order)
else:
final_rate_list = []
variables = {
'form': form,
'rate_list': final_rate_list,
'rate_list_count': len(final_rate_list),
'col_name_with_order': page_data['col_name_with_order'],
'RATE_COLUMN_NAME': RATE_COLUMN_NAME,
'sort_order': sort_order,
'up_icon': '<i class="glyphicon glyphicon-chevron-up"></i>',
'down_icon': '<i class="glyphicon glyphicon-chevron-down"></i>'
}
return render_to_response('voip_billing/rates.html',
variables,
context_instance=RequestContext(request))
@permission_required('user_profile.export_call_rate', login_url='/')
@login_required
def export_rate(request):
"""
**Logic Description**:
get the prifix rates from voip rate API
according to search parameters & store into csv file
"""
format_type = request.GET['format']
response = HttpResponse(content_type='text/%s' % format_type)
response['Content-Disposition'] = 'attachment;filename=call_rate.%s' % format_type
headers = ('prefix', 'destination', 'retail_rate')
final_result = []
if request.session.get('session_api_url'):
api_url = request.session['session_api_url']
final_result = rest_api_call(request, api_url)
list_val = []
for row in final_result:
list_val.append((row['prefix'], row['prefix__destination'], row['retail_rate']))
data = tablib.Dataset(*list_val, headers=headers)
if format_type == Export_choice.XLS:
response.write(data.xls)
elif format_type == Export_choice.CSV:
response.write(data.csv)
elif format_type == Export_choice.JSON:
response.write(data.json)
return response
@permission_required('user_profile.simulator', login_url='/')
@check_user_detail('voipplan')
@login_required
def simulator(request):
"""Client Simulator
To view rate according to VoIP Plan & Destination No.
**Attributes**:
* ``template`` - voip_billing/simulator.html
* ``form`` - SimulatorForm
**Logic Description**:
get min call rates for destination from rate_engine and display them in template
"""
data = []
form = SimulatorForm(request.user, request.POST or None)
# Get Voip Plan ID according to USER
if form.is_valid():
# IS recipient_phone_no/destination no is valid prefix
# (Not banned Prefix) ?
destination_no = request.POST.get("destination_no")
if hasattr(request.user, 'userprofile'):
voipplan_id = request.user.userprofile.voipplan_id
allowed = prefix_allowed_to_call(destination_no, voipplan_id)
if allowed:
rates = rate_engine(voipplan_id=voipplan_id, dest_number=destination_no)
for rate in rates:
r_r_plan = VoIPRetailRate.objects.get(id=rate.rrid)
data.append((voipplan_id,
r_r_plan.voip_retail_plan_id.name,
rate.retail_rate))
data = {
'form': form,
'data': data,
}
return render_to_response('voip_billing/simulator.html',
data,
context_instance=RequestContext(request))
@permission_required('user_profile.billing_report', login_url='/')
@check_user_detail('accountcode,voipplan')
@login_required
def billing_report(request):
"""CDR billing graph by daily basis
**Attributes**:
* ``template`` - voip_billing/billing_report.html
* ``form`` - BillingReportForm
**Logic Description**:
Retrieve call records from PostgreSQL and build the
daily billing analytics for given date range
"""
switch_id = 0
tday = datetime.today()
total_data = []
charttype = "lineWithFocusChart"
hourly_chartdata = {"x": []}
form = BillingReportForm(request.POST or None,
initial={'from_date': tday.strftime('%Y-%m-%d 00:00'),
'to_date': tday.strftime('%Y-%m-%d 23:55'),
'switch_id': switch_id})
start_date = trunc_date_start(tday)
end_date = trunc_date_end(tday)
if form.is_valid():
from_date = getvar(request, 'from_date')
to_date = getvar(request, 'to_date')
start_date = trunc_date_start(from_date)
end_date = trunc_date_end(to_date)
switch_id = getvar(request, 'switch_id')
metrics = ['buy_cost', 'sell_cost']
hourly_data = get_report_cdr_per_switch(request.user, 'hour', start_date, end_date, switch_id)
hourly_chartdata['x'] = hourly_data["nbcalls"]["x_timestamp"]
i = 0
for metric in metrics:
extra_serie = {
"tooltip": {"y_start": "", "y_end": " " + metric},
"date_format": "%d %b %y %H:%M%p"
}
for switch in hourly_data[metric]["columns"]:
i = i + 1
hourly_chartdata['name' + str(i)] = get_switch_ip_addr(switch) + "_" + metric
hourly_chartdata['y' + str(i)] = hourly_data[metric]["values"][str(switch)]
hourly_chartdata['extra' + str(i)] = extra_serie
total_calls = hourly_data["nbcalls"]["total"]
total_duration = hourly_data["duration"]["total"]
total_billsec = hourly_data["billsec"]["total"]
total_buy_cost = hourly_data["buy_cost"]["total"]
total_sell_cost = hourly_data["sell_cost"]["total"]
# Calculate the Average Time of Call
metric_aggr = calculate_act_acd(total_calls, total_duration)
# Get top 10 of country calls
country_data = custom_sql_aggr_top_country(request.user, switch_id, 10, start_date, end_date)
data = {
'form': form,
'total_data': total_data,
'start_date': start_date,
'end_date': end_date,
'charttype': charttype,
'chartdata': hourly_chartdata,
'chartcontainer': 'chart_container',
'extra': {
'x_is_date': True,
'x_axis_format': '%d %b %Y',
'tag_script_js': True,
'jquery_on_ready': True,
},
'total_calls': total_calls,
'total_duration': total_duration,
'total_billsec': total_billsec,
'total_buy_cost': total_buy_cost,
'total_sell_cost': total_sell_cost,
'metric_aggr': metric_aggr,
'country_data': country_data,
}
return render_to_response('voip_billing/billing_report.html',
data,
context_instance=RequestContext(request))
| mpl-2.0 |
classicboyir/BuildingMachineLearningSystemsWithPython | ch09/01_fft_based_classifier.py | 24 | 3740 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
from collections import defaultdict
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import auc
from sklearn.cross_validation import ShuffleSplit
from sklearn.metrics import confusion_matrix
from utils import plot_pr, plot_roc, plot_confusion_matrix, GENRE_LIST
from fft import read_fft
genre_list = GENRE_LIST
def train_model(clf_factory, X, Y, name, plot=False):
labels = np.unique(Y)
cv = ShuffleSplit(
n=len(X), n_iter=1, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = defaultdict(list)
precisions, recalls, thresholds = defaultdict(
list), defaultdict(list), defaultdict(list)
roc_scores = defaultdict(list)
tprs = defaultdict(list)
fprs = defaultdict(list)
clfs = [] # just to later get the median
cms = []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf = clf_factory()
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
scores.append(test_score)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cms.append(cm)
for label in labels:
y_label_test = np.asarray(y_test == label, dtype=int)
proba = clf.predict_proba(X_test)
proba_label = proba[:, label]
precision, recall, pr_thresholds = precision_recall_curve(
y_label_test, proba_label)
pr_scores[label].append(auc(recall, precision))
precisions[label].append(precision)
recalls[label].append(recall)
thresholds[label].append(pr_thresholds)
fpr, tpr, roc_thresholds = roc_curve(y_label_test, proba_label)
roc_scores[label].append(auc(fpr, tpr))
tprs[label].append(tpr)
fprs[label].append(fpr)
if plot:
for label in labels:
print("Plotting %s" % genre_list[label])
scores_to_sort = roc_scores[label]
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
desc = "%s %s" % (name, genre_list[label])
plot_pr(pr_scores[label][median], desc, precisions[label][median],
recalls[label][median], label='%s vs rest' % genre_list[label])
plot_roc(roc_scores[label][median], desc, tprs[label][median],
fprs[label][median], label='%s vs rest' % genre_list[label])
all_pr_scores = np.asarray(pr_scores.values()).flatten()
summary = (np.mean(scores), np.std(scores),
np.mean(all_pr_scores), np.std(all_pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors), np.asarray(cms)
def create_model():
from sklearn.linear_model.logistic import LogisticRegression
clf = LogisticRegression()
return clf
if __name__ == "__main__":
X, y = read_fft(genre_list)
train_avg, test_avg, cms = train_model(
create_model, X, y, "Log Reg FFT", plot=True)
cm_avg = np.mean(cms, axis=0)
cm_norm = cm_avg / np.sum(cm_avg, axis=0)
plot_confusion_matrix(cm_norm, genre_list, "fft",
"Confusion matrix of an FFT based classifier")
| mit |
Lab603/PicEncyclopedias | jni-build/jni-build/jni/include/tensorflow/contrib/learn/__init__.py | 8 | 1912 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning with TensorFlow.
## Estimators
Train and evaluate TensorFlow models.
@@BaseEstimator
@@Estimator
@@ModeKeys
@@TensorFlowClassifier
@@DNNClassifier
@@DNNRegressor
@@TensorFlowDNNClassifier
@@TensorFlowDNNRegressor
@@TensorFlowEstimator
@@LinearClassifier
@@LinearRegressor
@@TensorFlowLinearClassifier
@@TensorFlowLinearRegressor
@@TensorFlowRNNClassifier
@@TensorFlowRNNRegressor
@@TensorFlowRegressor
## Graph actions
Perform various training, evaluation, and inference actions on a graph.
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
## Input processing
Queue and read batched input data.
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
from tensorflow.python.util.all_util import make_all
__all__ = make_all(__name__)
__all__.append('datasets')
| mit |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/neighbors/tests/test_dist_metrics.py | 36 | 6957 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.neighbors import BallTree
from sklearn.utils.testing import SkipTest, assert_raises_regex
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_pickle(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
yield self.check_pickle, metric, kwargs
for metric in self.bool_metrics:
yield self.check_pickle_bool, metric
def check_pickle_bool(self, metric):
dm = DistanceMetric.get_metric(metric)
D1 = dm.pairwise(self.X1_bool)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1_bool)
assert_array_almost_equal(D1, D2)
def check_pickle(self, metric, kwargs):
dm = DistanceMetric.get_metric(metric, **kwargs)
D1 = dm.pairwise(self.X1)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1)
assert_array_almost_equal(D1, D2)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
def test_bad_pyfunc_metric():
def wrong_distance(x, y):
return "1"
X = np.ones((5, 2))
assert_raises_regex(TypeError,
"Custom distance function must accept two vectors",
BallTree, X, metric=wrong_distance)
def test_input_data_size():
# Regression test for #6288
# Previoulsly, a metric requiring a particular input dimension would fail
def custom_metric(x, y):
assert x.shape[0] == 3
return np.sum((x - y) ** 2)
rng = np.random.RandomState(0)
X = rng.rand(10, 3)
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
eucl = DistanceMetric.get_metric("euclidean")
assert_array_almost_equal(pyfunc.pairwise(X), eucl.pairwise(X))
| mit |
terrycojones/dark-matter | dark/mutations.py | 1 | 16454 | import os
from collections import defaultdict
import numpy as np
try:
import matplotlib
if not os.environ.get('DISPLAY'):
# Use non-interactive Agg backend
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except ImportError:
import platform
if platform.python_implementation() == 'PyPy':
# PyPy doesn't have a version of matplotlib. Make a fake
# class that raises if it is used. This allows us to use other
# 'dark' code that happens to import dark.mutations but not use the
# functions that rely on matplotlib.
class plt(object):
def __getattr__(self, _):
raise NotImplementedError(
'matplotlib is not supported under pypy')
else:
raise
from random import choice, uniform
from dark import ncbidb
def basePlotter(blastHits, title):
"""
Plot the reads and the subject, so that bases in the reads which are
different from the subject are shown. Else a '.' is shown.
like so:
subject_gi ATGCGTACGTACGACACC
read_1 A......TTC..T
@param blastHits: A L{dark.blast.BlastHits} instance.
@param title: A C{str} sequence title that was matched by BLAST. We plot
the reads that matched this title.
"""
result = []
params = blastHits.plotParams
assert params is not None, ('Oops, it looks like you forgot to run '
'computePlotInfo.')
sequence = ncbidb.getSequence(title, blastHits.records.blastDb)
subject = sequence.seq
gi = title.split('|')[1]
sub = '%s\t \t \t%s' % (gi, subject)
result.append(sub)
plotInfo = blastHits.titles[title]['plotInfo']
assert plotInfo is not None, ('Oops, it looks like you forgot to run '
'computePlotInfo.')
items = plotInfo['items']
count = 0
for item in items:
count += 1
hsp = item['hsp']
queryTitle = blastHits.fasta[item['readNum']].id
# If the product of the subject and query frame values is +ve,
# then they're either both +ve or both -ve, so we just use the
# query as is. Otherwise, we need to reverse complement it.
if item['frame']['subject'] * item['frame']['query'] > 0:
query = blastHits.fasta[item['readNum']].seq
reverse = False
else:
# One of the subject or query has negative sense.
query = blastHits.fasta[
item['readNum']].reverse_complement().seq
reverse = True
query = query.upper()
queryStart = hsp['queryStart']
subjectStart = hsp['subjectStart']
queryEnd = hsp['queryEnd']
subjectEnd = hsp['subjectEnd']
# Before comparing the read to the subject, make a string of the
# same length as the subject, which contains the read and
# has ' ' where the read does not match.
# 3 parts need to be taken into account:
# 1) the left offset (if the query doesn't stick out to the left)
# 2) the query. if the frame is -1, it has to be reversed.
# The query consists of 3 parts: left, middle (control for gaps)
# 3) the right offset
# Do part 1) and 2).
if queryStart < 0:
# The query is sticking out to the left.
leftQuery = ''
if subjectStart == 0:
# The match starts at the first base of the subject.
middleLeftQuery = ''
else:
# The match starts into the subject.
# Determine the length of the not matching query
# part to the left.
leftOffset = -1 * queryStart
rightOffset = subjectStart + leftOffset
middleLeftQuery = query[leftOffset:rightOffset]
else:
# The query is not sticking out to the left
# make the left offset.
leftQuery = queryStart * ' '
leftQueryOffset = subjectStart - queryStart
middleLeftQuery = query[:leftQueryOffset]
# Do part 3).
# Disregard gaps in subject while adding.
matchQuery = item['origHsp'].query
matchSubject = item['origHsp'].sbjct
index = 0
mid = ''
for item in range(len(matchQuery)):
if matchSubject[index] != ' ':
mid += matchQuery[index]
index += 1
# if the query has been reversed, turn the matched part around
if reverse:
rev = ''
toReverse = mid
reverseDict = {' ': ' ', '-': '-', 'A': 'T', 'T': 'A',
'C': 'G', 'G': 'C', '.': '.', 'N': 'N'}
for item in toReverse:
newItem = reverseDict[item]
rev += newItem
mid = rev[::-1]
middleQuery = middleLeftQuery + mid
# add right not-matching part of the query
rightQueryOffset = queryEnd - subjectEnd
rightQuery = query[-rightQueryOffset:]
middleQuery += rightQuery
read = leftQuery + middleQuery
# do part 3)
offset = len(subject) - len(read)
# if the read is sticking out to the right
# chop it off
if offset < 0:
read = read[:offset]
# if it's not sticking out, fill the space with ' '
elif offset > 0:
read += offset * ' '
# compare the subject and the read, make a string
# called 'comparison', which contains a '.' if the bases
# are equal and the letter of the read if they are not.
comparison = ''
for readBase, subjectBase in zip(read, subject):
if readBase == ' ':
comparison += ' '
elif readBase == subjectBase:
comparison += '.'
elif readBase != subjectBase:
comparison += readBase
index += 1
que = '%s \t %s' % (queryTitle, comparison)
result.append(que)
# sanity checks
assert (len(comparison) == len(subject)), (
'%d != %d' % (len(comparison), len(subject)))
index = 0
if comparison[index] == ' ':
index += 1
else:
start = index - 1
assert (start == queryStart or start == -1), (
'%s != %s or %s != -1' % (start, queryStart, start))
return result
def getAPOBECFrequencies(dotAlignment, orig, new, pattern):
"""
Gets mutation frequencies if they are in a certain pattern.
@param dotAlignment: result from calling basePlotter
@param orig: A C{str}, naming the original base
@param new: A C{str}, what orig was mutated to
@param pattern: A C{str}m which pattern we're looking for
(must be one of 'cPattern', 'tPattern')
"""
cPattern = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG', 'CCT',
'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC', 'TCG', 'TCT']
tPattern = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG', 'CTT',
'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC', 'TTG', 'TTT']
# choose the right pattern
if pattern == 'cPattern':
patterns = cPattern
middleBase = 'C'
else:
patterns = tPattern
middleBase = 'T'
# generate the freqs dict with the right pattern
freqs = defaultdict(int)
for pattern in patterns:
freqs[pattern] = 0
# get the subject sequence from dotAlignment
subject = dotAlignment[0].split('\t')[3]
# exclude the subject from the dotAlignment, so just the queries
# are left over
queries = dotAlignment[1:]
for item in queries:
query = item.split('\t')[1]
index = 0
for queryBase in query:
qBase = query[index]
sBase = subject[index]
if qBase == new and sBase == orig:
try:
plusSb = subject[index + 1]
minusSb = subject[index - 1]
except IndexError:
plusSb = 'end'
motif = '%s%s%s' % (minusSb, middleBase, plusSb)
if motif in freqs:
freqs[motif] += 1
index += 1
return freqs
def getCompleteFreqs(blastHits):
"""
Make a dictionary which collects all mutation frequencies from
all reads.
Calls basePlotter to get dotAlignment, which is passed to
getAPOBECFrequencies with the respective parameter, to collect
the frequencies.
@param blastHits: A L{dark.blast.BlastHits} instance.
"""
allFreqs = {}
for title in blastHits.titles:
allFreqs[title] = {
'C>A': {},
'C>G': {},
'C>T': {},
'T>A': {},
'T>C': {},
'T>G': {},
}
basesPlotted = basePlotter(blastHits, title)
for mutation in allFreqs[title]:
orig = mutation[0]
new = mutation[2]
if orig == 'C':
pattern = 'cPattern'
else:
pattern = 'tPattern'
freqs = getAPOBECFrequencies(basesPlotted, orig, new, pattern)
allFreqs[title][mutation] = freqs
numberOfReads = len(blastHits.titles[title]['plotInfo']['items'])
allFreqs[title]['numberOfReads'] = numberOfReads
allFreqs[title]['bitScoreMax'] = blastHits.titles[
title]['plotInfo']['bitScoreMax']
return allFreqs
def makeFrequencyGraph(allFreqs, title, substitution, pattern,
color='blue', createFigure=True, showFigure=True,
readsAx=False):
"""
For a title, make a graph showing the frequencies.
@param allFreqs: result from getCompleteFreqs
@param title: A C{str}, title of virus of which frequencies should be
plotted.
@param substitution: A C{str}, which substitution should be plotted;
must be one of 'C>A', 'C>G', 'C>T', 'T>A', 'T>C', 'T>G'.
@param pattern: A C{str}, which pattern we're looking for ( must be
one of 'cPattern', 'tPattern')
@param color: A C{str}, color of bars.
@param createFigure: If C{True}, create a figure.
@param showFigure: If C{True}, show the created figure.
@param readsAx: If not None, use this as the subplot for displaying reads.
"""
cPattern = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG', 'CCT',
'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC', 'TCG', 'TCT']
tPattern = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG', 'CTT',
'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC', 'TTG', 'TTT']
# choose the right pattern
if pattern == 'cPattern':
patterns = cPattern
else:
patterns = tPattern
fig = plt.figure(figsize=(10, 10))
ax = readsAx or fig.add_subplot(111)
# how many bars
N = 16
ind = np.arange(N)
width = 0.4
# make a list in the right order, so that it can be plotted easily
divisor = allFreqs[title]['numberOfReads']
toPlot = allFreqs[title][substitution]
index = 0
data = []
for item in patterns:
newData = toPlot[patterns[index]] / divisor
data.append(newData)
index += 1
# create the bars
ax.bar(ind, data, width, color=color)
maxY = np.max(data) + 5
# axes and labels
if createFigure:
title = title.split('|')[4][:50]
ax.set_title('%s \n %s' % (title, substitution), fontsize=20)
ax.set_ylim(0, maxY)
ax.set_ylabel('Absolute Number of Mutations', fontsize=16)
ax.set_xticks(ind + width)
ax.set_xticklabels(patterns, rotation=45, fontsize=8)
if createFigure is False:
ax.set_xticks(ind + width)
ax.set_xticklabels(patterns, rotation=45, fontsize=0)
else:
if showFigure:
plt.show()
return maxY
def makeFrequencyPanel(allFreqs, patientName):
"""
For a title, make a graph showing the frequencies.
@param allFreqs: result from getCompleteFreqs
@param patientName: A C{str}, title for the panel
"""
titles = sorted(
iter(allFreqs.keys()),
key=lambda title: (allFreqs[title]['bitScoreMax'], title))
origMaxY = 0
cols = 6
rows = len(allFreqs)
figure, ax = plt.subplots(rows, cols, squeeze=False)
substitutions = ['C>A', 'C>G', 'C>T', 'T>A', 'T>C', 'T>G']
colors = ['blue', 'black', 'red', 'yellow', 'green', 'orange']
for i, title in enumerate(titles):
for index in range(6):
for subst in allFreqs[str(title)]:
substitution = substitutions[index]
print(i, index, title, 'substitution', substitutions[index])
if substitution[0] == 'C':
pattern = 'cPattern'
else:
pattern = 'tPattern'
maxY = makeFrequencyGraph(allFreqs, title, substitution,
pattern, color=colors[index],
createFigure=False, showFigure=False,
readsAx=ax[i][index])
if maxY > origMaxY:
origMaxY = maxY
# add title for individual plot.
# if used for other viruses, this will have to be adapted.
if index == 0:
gi = title.split('|')[1]
titles = title.split(' ')
try:
typeIndex = titles.index('type')
except ValueError:
typeNumber = 'gi: %s' % gi
else:
typeNumber = titles[typeIndex + 1]
ax[i][index].set_ylabel(('Type %s \n maxBitScore: %s' % (
typeNumber, allFreqs[title]['bitScoreMax'])), fontsize=10)
# add xAxis tick labels
if i == 0:
ax[i][index].set_title(substitution, fontsize=13)
if i == len(allFreqs) - 1 or i == (len(allFreqs) - 1) / 2:
if index < 3:
pat = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG',
'CCT', 'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC',
'TCG', 'TCT']
else:
pat = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG',
'CTT', 'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC',
'TTG', 'TTT']
ax[i][index].set_xticklabels(pat, rotation=45, fontsize=8)
# make Y-axis equal
for i, title in enumerate(allFreqs):
for index in range(6):
a = ax[i][index]
a.set_ylim([0, origMaxY])
# add title of whole panel
figure.suptitle('Mutation Signatures in %s' % patientName, fontsize=20)
figure.set_size_inches(5 * cols, 3 * rows, forward=True)
figure.show()
return allFreqs
def mutateString(original, n, replacements='acgt'):
"""
Mutate C{original} in C{n} places with chars chosen from C{replacements}.
@param original: The original C{str} to mutate.
@param n: The C{int} number of locations to mutate.
@param replacements: The C{str} of replacement letters.
@return: A new C{str} with C{n} places of C{original} mutated.
@raises ValueError: if C{n} is too high, or C{replacement} contains
duplicates, or if no replacement can be made at a certain locus
because C{replacements} is of length one, or if C{original} is of
zero length.
"""
if not original:
raise ValueError('Empty original string passed.')
if n > len(original):
raise ValueError('Cannot make %d mutations in a string of length %d' %
(n, len(original)))
if len(replacements) != len(set(replacements)):
raise ValueError('Replacement string contains duplicates')
if len(replacements) == 1 and original.find(replacements) != -1:
raise ValueError('Impossible replacement')
result = list(original)
length = len(original)
for offset in range(length):
if uniform(0.0, 1.0) < float(n) / (length - offset):
# Mutate.
while True:
new = choice(replacements)
if new != result[offset]:
result[offset] = new
break
n -= 1
if n == 0:
break
return ''.join(result)
| mit |
Subsets and Splits