repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
chengduoZH/Paddle
|
python/paddle/dataset/uci_housing.py
|
2
|
4214
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
UCI Housing dataset.
This module will download dataset from
https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ and
parse training set and test set into paddle reader creators.
"""
from __future__ import print_function
import numpy as np
import six
import tempfile
import tarfile
import os
import paddle.dataset.common
__all__ = ['train', 'test']
URL = 'http://paddlemodels.bj.bcebos.com/uci_housing/housing.data'
MD5 = 'd4accdce7a25600298819f8e28e8d593'
feature_names = [
'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',
'PTRATIO', 'B', 'LSTAT'
]
UCI_TRAIN_DATA = None
UCI_TEST_DATA = None
FLUID_URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/fluid/fit_a_line.fluid.tar'
FLUID_MD5_MODEL = '6e6dd637ccd5993961f68bfbde46090b'
def feature_range(maximums, minimums):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
feature_num = len(maximums)
ax.bar(list(range(feature_num)),
maximums - minimums,
color='r',
align='center')
ax.set_title('feature scale')
plt.xticks(list(range(feature_num)), feature_names)
plt.xlim([-1, feature_num])
fig.set_figheight(6)
fig.set_figwidth(10)
if not os.path.exists('./image'):
os.makedirs('./image')
fig.savefig('image/ranges.png', dpi=48)
plt.close(fig)
def load_data(filename, feature_num=14, ratio=0.8):
global UCI_TRAIN_DATA, UCI_TEST_DATA
if UCI_TRAIN_DATA is not None and UCI_TEST_DATA is not None:
return
data = np.fromfile(filename, sep=' ')
data = data.reshape(data.shape[0] // feature_num, feature_num)
maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum(
axis=0) / data.shape[0]
feature_range(maximums[:-1], minimums[:-1])
for i in six.moves.range(feature_num - 1):
data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i])
offset = int(data.shape[0] * ratio)
UCI_TRAIN_DATA = data[:offset]
UCI_TEST_DATA = data[offset:]
def train():
"""
UCI_HOUSING training set creator.
It returns a reader creator, each sample in the reader is features after
normalization and price number.
:return: Training reader creator
:rtype: callable
"""
global UCI_TRAIN_DATA
load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))
def reader():
for d in UCI_TRAIN_DATA:
yield d[:-1], d[-1:]
return reader
def test():
"""
UCI_HOUSING test set creator.
It returns a reader creator, each sample in the reader is features after
normalization and price number.
:return: Test reader creator
:rtype: callable
"""
global UCI_TEST_DATA
load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))
def reader():
for d in UCI_TEST_DATA:
yield d[:-1], d[-1:]
return reader
def fluid_model():
parameter_tar = paddle.dataset.common.download(
FLUID_URL_MODEL, 'uci_housing', FLUID_MD5_MODEL, 'fit_a_line.fluid.tar')
tar = tarfile.TarFile(parameter_tar, mode='r')
dirpath = tempfile.mkdtemp()
tar.extractall(path=dirpath)
return dirpath
def predict_reader():
"""
It returns just one tuple data to do inference.
:return: one tuple data
:rtype: tuple
"""
global UCI_TEST_DATA
load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))
return (UCI_TEST_DATA[0][:-1], )
def fetch():
paddle.dataset.common.download(URL, 'uci_housing', MD5)
|
apache-2.0
|
trankmichael/scipy
|
scipy/interpolate/interpolate.py
|
25
|
80287
|
""" Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange', 'PPoly', 'BPoly', 'RegularGridInterpolator',
'interpn']
import itertools
from numpy import (shape, sometrue, array, transpose, searchsorted,
ones, logical_or, atleast_1d, atleast_2d, ravel,
dot, poly1d, asarray, intp)
import numpy as np
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
import math
import warnings
import functools
import operator
from scipy._lib.six import xrange, integer_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
def reduce_sometrue(a):
all = a
while len(shape(all)) > 1:
all = sometrue(all, axis=0)
return all
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : numpy.poly1d instance
The Lagrange interpolating polynomial.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'
where 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of first, second or third order) or as an integer
specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised.
fill_value : float, optional
If provided, then this value will be used to fill in for requested
points outside of the data range. If not provided, then the default
is NaN.
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=True, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.copy = copy
self.bounds_error = bounds_error
self.fill_value = fill_value
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest': 0, 'zero': 0,'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
y = self._reshape_yi(y)
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
self.x_bds = (x[1:] + x[:-1]) / 2.0
self._call = self.__class__._call_nearest
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
self._spline = splmake(x, y, order=order)
self._call = self.__class__._call_spline
if len(x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
self._kind = kind
self.x = x
self._y = y
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return spleval(self._spline, x_new)
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
out_of_bounds = self._check_bounds(x_new)
y_new = self._call(self, x_new)
if len(y_new) > 0:
y_new[out_of_bounds] = self.fill_value
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
out_of_bounds = logical_or(below_bounds, above_bounds)
return out_of_bounds
class _PPolyBase(object):
"""
Base class for piecewise polynomials.
"""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("%s must be between 0 and %s" % (axis, c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
if np.any(self.x[1:] - self.x[:-1] < 0):
raise ValueError("x-coordinates are not in increasing order")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=True):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals
``self.x[-1] <= x < x_right[0]``, ``x_right[0] <= x < x_right[1]``,
..., ``x_right[m-2] <= x < x_right[m-1]``
x : ndarray, size (m,)
Additional breakpoints. Must be sorted and either to
the right or to the left of the current breakpoints.
right : bool, optional
Whether the new intervals are to the right or to the left
of the current intervals.
"""
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if right:
if x[0] < self.x[-1]:
raise ValueError("new x are not to the right of current ones")
else:
if x[-1] > self.x[0]:
raise ValueError("new x are not to the left of current ones")
if c.size == 0:
return
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if right:
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
else:
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ith interval is ``x[i] <= xp < x[i+1]``::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial. This representation
is the local power basis.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu,:].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivativative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. (Default: 1)
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
# construct a compatible polynomial
return self.construct_fast(c, self.x, self.extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
# Compute the integral
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.integrate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate),
out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : bool, optional
Whether to return roots from the polynomial extrapolated
based on first and last intervals.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep`
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**(a) * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ``i``-th interval ``x[i] <= xp < x[i+1]``
is written in the Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1))
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = comb(k, a) * t**k * (1 - t)**(k - a)
with ``t = (x - x[i]) / (x[i+1] - x[i])``.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, doi:10.1155/2011/829543
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k2 = k - nu representing the derivative
of this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k2 = k + nu representing the
antiderivative of this polynomial.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the breakpoint)
# Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k,:], axis=0)[:-1]
return self.construct_fast(c2, x, self.extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Defaults to ``self.extrapolate``.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is not None:
ib.extrapolate = extrapolate
return ib(b) - ib(a)
def extend(self, c, x, right=True):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, integer_types):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
raise ValueError("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" %
(xi[i], len(y1), xi[i+1], len(y2), orders[i]))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1], y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating)
or np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x,y,z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype')
and not np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices, norm_distances, out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices, norm_distances, out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
def _setdiag(a, k, v):
if not a.ndim == 2:
raise ValueError("Input array should be 2-D.")
M,N = a.shape
if k > 0:
start = k
num = N - k
else:
num = M + k
start = abs(k)*N
end = start + num*(N+1)-1
a.flat[start:end:(N+1)] = v
# Return the spline that minimizes the dis-continuity of the
# "order-th" derivative; for order >= 2.
def _find_smoothest2(xk, yk):
N = len(xk) - 1
Np1 = N + 1
# find pseudo-inverse of B directly.
Bd = np.empty((Np1, N))
for k in range(-N,N):
if (k < 0):
l = np.arange(-k, Np1)
v = (l+k+1)
if ((k+1) % 2):
v = -v
else:
l = np.arange(k,N)
v = N - l
if ((k % 2)):
v = -v
_setdiag(Bd, k, v)
Bd /= (Np1)
V2 = np.ones((Np1,))
V2[1::2] = -1
V2 /= math.sqrt(Np1)
dk = np.diff(xk)
b = 2*np.diff(yk, axis=0)/dk
J = np.zeros((N-1,N+1))
idk = 1.0/dk
_setdiag(J,0,idk[:-1])
_setdiag(J,1,-idk[1:]-idk[:-1])
_setdiag(J,2,idk[1:])
A = dot(J.T,J)
val = dot(V2,dot(A,V2))
res1 = dot(np.outer(V2,V2)/val,A)
mk = dot(np.eye(Np1)-res1, _dot0(Bd,b))
return mk
def _get_spline2_Bb(xk, yk, kind, conds):
Np1 = len(xk)
dk = xk[1:]-xk[:-1]
if kind == 'not-a-knot':
# use banded-solver
nlu = (1,1)
B = ones((3,Np1))
alpha = 2*(yk[1:]-yk[:-1])/dk
zrs = np.zeros((1,)+yk.shape[1:])
row = (Np1-1)//2
b = np.concatenate((alpha[:row],zrs,alpha[row:]),axis=0)
B[0,row+2:] = 0
B[2,:(row-1)] = 0
B[0,row+1] = dk[row-1]
B[1,row] = -dk[row]-dk[row-1]
B[2,row-1] = dk[row]
return B, b, None, nlu
else:
raise NotImplementedError("quadratic %s is not available" % kind)
def _get_spline3_Bb(xk, yk, kind, conds):
# internal function to compute different tri-diagonal system
# depending on the kind of spline requested.
# conds is only used for 'second' and 'first'
Np1 = len(xk)
if kind in ['natural', 'second']:
if kind == 'natural':
m0, mN = 0.0, 0.0
else:
m0, mN = conds
# the matrix to invert is (N-1,N-1)
# use banded solver
beta = 2*(xk[2:]-xk[:-2])
alpha = xk[1:]-xk[:-1]
nlu = (1,1)
B = np.empty((3,Np1-2))
B[0,1:] = alpha[2:]
B[1,:] = beta
B[2,:-1] = alpha[1:-1]
dyk = yk[1:]-yk[:-1]
b = (dyk[1:]/alpha[1:] - dyk[:-1]/alpha[:-1])
b *= 6
b[0] -= m0
b[-1] -= mN
def append_func(mk):
# put m0 and mN into the correct shape for
# concatenation
ma = array(m0,copy=0,ndmin=yk.ndim)
mb = array(mN,copy=0,ndmin=yk.ndim)
if ma.shape[1:] != yk.shape[1:]:
ma = ma*(ones(yk.shape[1:])[np.newaxis,...])
if mb.shape[1:] != yk.shape[1:]:
mb = mb*(ones(yk.shape[1:])[np.newaxis,...])
mk = np.concatenate((ma,mk),axis=0)
mk = np.concatenate((mk,mb),axis=0)
return mk
return B, b, append_func, nlu
elif kind in ['clamped', 'endslope', 'first', 'not-a-knot', 'runout',
'parabolic']:
if kind == 'endslope':
# match slope of lagrange interpolating polynomial of
# order 3 at end-points.
x0,x1,x2,x3 = xk[:4]
sl_0 = (1./(x0-x1)+1./(x0-x2)+1./(x0-x3))*yk[0]
sl_0 += (x0-x2)*(x0-x3)/((x1-x0)*(x1-x2)*(x1-x3))*yk[1]
sl_0 += (x0-x1)*(x0-x3)/((x2-x0)*(x2-x1)*(x3-x2))*yk[2]
sl_0 += (x0-x1)*(x0-x2)/((x3-x0)*(x3-x1)*(x3-x2))*yk[3]
xN3,xN2,xN1,xN0 = xk[-4:]
sl_N = (1./(xN0-xN1)+1./(xN0-xN2)+1./(xN0-xN3))*yk[-1]
sl_N += (xN0-xN2)*(xN0-xN3)/((xN1-xN0)*(xN1-xN2)*(xN1-xN3))*yk[-2]
sl_N += (xN0-xN1)*(xN0-xN3)/((xN2-xN0)*(xN2-xN1)*(xN3-xN2))*yk[-3]
sl_N += (xN0-xN1)*(xN0-xN2)/((xN3-xN0)*(xN3-xN1)*(xN3-xN2))*yk[-4]
elif kind == 'clamped':
sl_0, sl_N = 0.0, 0.0
elif kind == 'first':
sl_0, sl_N = conds
# Now set up the (N+1)x(N+1) system of equations
beta = np.r_[0,2*(xk[2:]-xk[:-2]),0]
alpha = xk[1:]-xk[:-1]
gamma = np.r_[0,alpha[1:]]
B = np.diag(alpha,k=-1) + np.diag(beta) + np.diag(gamma,k=1)
d1 = alpha[0]
dN = alpha[-1]
if kind == 'not-a-knot':
d2 = alpha[1]
dN1 = alpha[-2]
B[0,:3] = [d2,-d1-d2,d1]
B[-1,-3:] = [dN,-dN1-dN,dN1]
elif kind == 'runout':
B[0,:3] = [1,-2,1]
B[-1,-3:] = [1,-2,1]
elif kind == 'parabolic':
B[0,:2] = [1,-1]
B[-1,-2:] = [-1,1]
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
B[0,:2] = [2*d1,d1]
B[-1,-2:] = [dN,2*dN]
# Set up RHS (b)
b = np.empty((Np1,)+yk.shape[1:])
dyk = (yk[1:]-yk[:-1])*1.0
if kind in ['not-a-knot', 'runout', 'parabolic']:
b[0] = b[-1] = 0.0
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
b[0] = (dyk[0]/d1 - sl_0)
b[-1] = -(dyk[-1]/dN - sl_N)
b[1:-1,...] = (dyk[1:]/alpha[1:]-dyk[:-1]/alpha[:-1])
b *= 6.0
return B, b, None, None
else:
raise ValueError("%s not supported" % kind)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# If conds is None, then use the not_a_knot condition
# at K-1 farthest separated points in the interval
def _find_not_a_knot(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued second
# derivative at K-1 farthest separated points
def _find_natural(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued first
# derivative at K-1 farthest separated points
def _find_clamped(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def _find_fixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then use coefficient periodicity
# If conds is 'function' then use function periodicity
def _find_periodic(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# Doesn't use conds
def _find_symmetric(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# conds is a dictionary with multiple values
def _find_mixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj,cvals,k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),)+index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj,cvals.real[sl],k,deriv)
res[sl].imag = _fitpack._bspleval(xx,xj,cvals.imag[sl],k,deriv)
else:
res[sl] = _fitpack._bspleval(xx,xj,cvals[sl],k,deriv)
res.shape = oldshape + sh
return res
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple.
"""
return ppform.fromspline(xk, cvals, k)
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk,yk,order=order,kind=kind,conds=conds),xnew)
|
bsd-3-clause
|
AstroVPK/kali
|
python/kali/mbhbcarma.py
|
2
|
72863
|
#!/usr/bin/env python
""" Module to perform modulated C-ARMA modelling where the presence of a MBHB causes beaming.
"""
import numpy as np
import math as math
import scipy.stats as spstats
import cmath as cmath
import operator as operator
import sys as sys
import abc as abc
import psutil as psutil
import types as types
import os as os
import reprlib as reprlib
import copy as copy
from scipy.interpolate import UnivariateSpline
import scipy.signal.spectral
import warnings as warnings
import matplotlib.pyplot as plt
import random
import pdb as pdb
from matplotlib import cm
import multi_key_dict
import gatspy.periodic
try:
import rand
import MBHBCARMATask_cython as MBHBCARMATask_cython
import kali.lc
from kali.util.mpl_settings import set_plot_params
import kali.util.classproperty
import kali.util.triangle
except ImportError:
print('kali is not setup. Setup kali by sourcing bin/setup.sh')
sys.exit(1)
fhgt = 10
fwid = 16
set_plot_params(useTex=True)
COLORX = r'#984ea3'
COLORY = r'#ff7f00'
COLORS = [r'#4daf4a', r'#ccebc5']
ln10 = math.log(10)
def pogsonFlux(mag, magErr):
flux = 3631.0*math.pow(10.0, (-1.0*mag)/2.5)
fluxErr = (ln10/2.5)*flux*magErr
return flux, fluxErr
def _f7(seq):
"""http://tinyurl.com/angxm5"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def MAD(self, a):
medianVal = np.median(a)
b = np.copy(a)
for i in range(a.shape[0]):
b[i] = abs(b[i] - medianVal)
return np.median(b)
def roots(p, q, Theta):
"""!
\brief Theta -> Roots
"""
r = MBHBCARMATask.r
ARPoly = np.zeros(p + 1)
ARPoly[0] = 1.0
for i in range(p):
ARPoly[i + 1] = Theta[r + i]
ARRoots = np.array(np.roots(ARPoly))
MAPoly = np.zeros(q + 1)
for i in range(q + 1):
MAPoly[i] = Theta[r + p + q - i]
MARoots = np.array(np.roots(MAPoly))
Rho = np.zeros(r + p + q + 1, dtype='complex128')
for i in range(r):
Rho[i] = Theta[i]
for i in range(p):
Rho[r + i] = ARRoots[i]
for i in range(q):
Rho[r + p + i] = MARoots[i]
Sigma = np.require(np.zeros(p*p), requirements=['F', 'A', 'W', 'O', 'E'])
ThetaC = np.require(np.array(Theta), requirements=['F', 'A', 'W', 'O', 'E'])
MBHBCARMATask_cython.get_Sigma(r, p, q, ThetaC, Sigma)
Rho[r + p + q] = math.sqrt(Sigma[0])
return Rho
def coeffs(p, q, Rho):
"""!
\brief Roots -> Coeffs
"""
r = MBHBCARMATask.r
ARRoots = np.zeros(p, dtype='complex128')
for i in range(p):
ARRoots[i] = Rho[r + i]
ARPoly = np.array(np.poly(ARRoots))
MARoots = np.zeros(q, dtype='complex128')
for i in range(q):
MARoots[i] = Rho[r + p + i]
if q == 0:
MAPoly = np.ones(1)
else:
MAPoly = np.array(np.poly(MARoots))
ThetaPrime = np.require(
np.array(Rho[0:r].tolist() + ARPoly[1:].tolist() + MAPoly.tolist()[::-1]),
requirements=['F', 'A', 'W', 'O', 'E'])
SigmaPrime = np.require(np.zeros(p*p), requirements=['F', 'A', 'W', 'O', 'E'])
MBHBCARMATask_cython.get_Sigma(r, p, q, ThetaPrime, SigmaPrime)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
Sigma00 = math.pow(Rho[r + p + q], 2.0)
try:
bQ = math.sqrt(Sigma00/SigmaPrime[0])
except ValueError:
bQ = 1.0
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for i in range(q + 1):
MAPoly[i] = bQ*MAPoly[i]
Theta = np.zeros(r + p + q + 1, dtype='float64')
for i in range(r):
Theta[i] = Rho[i]
for i in range(p):
Theta[r + i] = ARPoly[i + 1].real
for i in range(q + 1):
Theta[r + p + i] = MAPoly[q - i].real
return Theta
def timescales(p, q, Rho):
"""!
\brief Roots -> Timescales
"""
r = MBHBCARMATask.r
imagPairs = 0
for i in range(p):
if Rho[r + i].imag != 0.0:
imagPairs += 1
numImag = imagPairs/2
numReal = numImag + (p - imagPairs)
decayTimescales = np.zeros(numReal)
oscTimescales = np.zeros(numImag)
realRoots = set(Rho[r:r + p].real)
imagRoots = set(abs(Rho[r:r + p].imag)).difference(set([0.0]))
realAR = sorted([1.0/abs(x) for x in realRoots])
imagAR = sorted([(2.0*math.pi)/abs(x) for x in imagRoots])
imagPairs = 0
for i in range(q):
if Rho[r + p + i].imag != 0.0:
imagPairs += 1
numImag = imagPairs/2
numReal = numImag + (q - imagPairs)
decayTimescales = np.zeros(numReal)
oscTimescales = np.zeros(numImag)
realRoots = set(Rho[r + p:r + p + q].real)
imagRoots = set(abs(Rho[r + p:r + p + q].imag)).difference(set([0.0]))
realMA = sorted([1.0/abs(x) for x in realRoots])
imagMA = sorted([(2.0*math.pi)/abs(x) for x in imagRoots])
return np.array(Rho[0:r].tolist() + realAR + imagAR + realMA + imagMA + [Rho[r + p + q]])
class MBHBCARMATask(object):
_type = 'kali.mbhbcarma'
_r = 8
G = 6.67408e-11
c = 299792458.0
pi = 3.141592653589793238462643383279502884197169399375105820974944592307816406286208998628034825342117067
twoPi = 2.0*pi
fourPi = 4.0*pi
fourPiSq = 4.0*math.pow(pi, 2.0)
Parsec = 3.0857e16
Day = 86164.090530833
Year = 31557600.0
SolarMass = 1.98855e30
kms2ms = 1.0e3
SolarMassPerCubicParsec = SolarMass/math.pow(Parsec, 3.0)
sigmaStars = 200.0
rhoStars = 1000.0
H = 16.0
_dict = multi_key_dict.multi_key_dict()
_dict[r'$a_{1}$ (pc)', 0, r'0', r'a1', r'a_1', r'$a_{1}$', r'$a_{1}~\mathrm{(pc)}$',
'semi-major axis 1', 'semimajor axis 1'] = 0
_dict[r'$a_{2}$ (pc)', 1, r'1', r'a2', r'a_2', r'$a_{2}$', r'$a_{2}~\mathrm{(pc)}$',
'semi-major axis 2', 'semimajor axis 2'] = 1
_dict[r'$T$ (d)', 2, r'2', r'T', r'$T$', r'$T~\mathrm{(d)}$', 'period'] = 2
_dict[r'$e$', 3, r'3', r'e', 'ellipticity', 'eccentricity'] = 3
_dict[r'$\Omega$ (deg.)', 4, r'4', r'$\Omega~\mathrm{(deg.)}$', r'Omega', r'omega'] = 4
_dict[r'$i$ (deg.)', 5, r'5', r'$i~\mathrm{(deg.)}$', r'Inclination', r'inclination'] = 5
_dict[r'$\tau$ (d)', 6, r'6', r'$\tau~\mathrm{(d)}$', r'Tau', r'tau'] = 6
_dict[r'$F$', 7, r'7', r'Flux', r'flux'] = 7
def __init__(self, p, q, nthreads=psutil.cpu_count(logical=True), nburn=1000000,
nwalkers=25*psutil.cpu_count(logical=True), nsteps=250, maxEvals=10000, xTol=0.001,
mcmcA=2.0):
try:
assert p > q, r'p must be greater than q'
assert p >= 1, r'p must be greater than or equal to 1'
assert isinstance(p, int), r'p must be an integer'
assert q >= 0, r'q must be greater than or equal to 0'
assert isinstance(q, int), r'q must be an integer'
assert nthreads > 0, r'nthreads must be greater than 0'
assert isinstance(nthreads, int), r'nthreads must be an integer'
assert nburn >= 0, r'nburn must be greater than or equal to 0'
assert isinstance(nburn, int), r'nburn must be an integer'
assert nwalkers > 0, r'nwalkers must be greater than 0'
assert isinstance(nwalkers, int), r'nwalkers must be an integer'
assert nsteps > 0, r'nsteps must be greater than 0'
assert isinstance(nsteps, int), r'nsteps must be an integer'
assert maxEvals > 0, r'maxEvals must be greater than 0'
assert isinstance(maxEvals, int), r'maxEvals must be an integer'
assert xTol > 0.0, r'xTol must be greater than 0'
assert isinstance(xTol, float), r'xTol must be a float'
self._p = p
self._q = q
self._ndims = self._r + self._p + self._q + 1
self._nthreads = nthreads
self._nburn = nburn
self._nwalkers = nwalkers
self._nsteps = nsteps
self._maxEvals = maxEvals
self._xTol = xTol
self._mcmcA = mcmcA
self._Chain = np.require(
np.zeros(self._ndims*self._nwalkers*self._nsteps), requirements=['F', 'A', 'W', 'O', 'E'])
self._LnPrior = np.require(
np.zeros(self._nwalkers*self._nsteps), requirements=['F', 'A', 'W', 'O', 'E'])
self._LnLikelihood = np.require(
np.zeros(self._nwalkers*self._nsteps), requirements=['F', 'A', 'W', 'O', 'E'])
self._taskCython = MBHBCARMATask_cython.MBHBCARMATask_cython(self._p, self._q, self._nthreads,
self._nburn)
self._pDIC = None
self._dic = None
self._name = 'kali.MBHBCARMATask(%d, %d)'%(self.p, self.q)
except AssertionError as err:
raise AttributeError(str(err))
def __getstate__(self):
"""!
\brief Trim the c-pointer from the task and return the remaining __dict__ for pickling.
"""
state = copy.copy(self.__dict__)
del state['_taskCython']
return state
def __setstate__(self, state):
"""!
\brief Restore an un-pickled task completely by setting the c-pointer to the right Cython class.
"""
self.__dict__ = copy.copy(state)
self._taskCython = MBHBCARMATask_cython.MBHBCARMATask_cython(self._p, self._q, self._nthreads,
self._nburn)
@kali.util.classproperty.ClassProperty
@classmethod
def type(self):
return self._type
@property
def name(self):
return self._name
@kali.util.classproperty.ClassProperty
@classmethod
def r(self):
return self._r
@property
def p(self):
return self._p
@p.setter
def p(self, value):
try:
assert value > self._q, r'p must be greater than q'
assert value >= 1, r'p must be greater than or equal to 1'
assert isinstance(value, int), r'p must be an integer'
self._taskCython.reset_CARMATask(value, self._q, self._nburn)
self._p = value
self._ndims = self._p + self._q + 1
self._Chain = np.require(np.zeros(self._ndims*self._nwalkers*self._nsteps),
requirements=['F', 'A', 'W', 'O', 'E'])
except AssertionError as err:
raise AttributeError(str(err))
@property
def q(self):
return self._q
@q.setter
def q(self, value):
try:
assert value > self._q, r'p must be greater than q'
assert value >= 0, r'q must be greater than or equal to 0'
assert isinstance(value, int), r'q must be an integer'
self._taskCython.reset_CARMATask(self._p, value, self._nburn)
self._q = value
self._ndims = self._p + self._q + 1
self._Chain = np.require(np.zeros(self._ndims*self._nwalkers*self._nsteps),
requirements=['F', 'A', 'W', 'O', 'E'])
except AssertionError as err:
raise AttributeError(str(err))
@property
def id(self):
return self.type + '.' + str(self.p) + '.' + str(self.q)
@property
def nthreads(self):
return self._nthreads
@property
def nburn(self):
return self._nburn
@nburn.setter
def nburn(self, nburnVal):
try:
assert value >= 0, r'nburn must be greater than or equal to 0'
assert isinstance(value, int), r'nburn must be an integer'
self._taskCython.reset_CARMATask(self._p, self._q, value)
self._nburn = value
except AssertionError as err:
raise AttributeError(str(err))
@property
def ndims(self):
return self._ndims
@property
def nwalkers(self):
return self._nwalkers
@nwalkers.setter
def nwalkers(self, value):
try:
assert value >= 0, r'nwalkers must be greater than or equal to 0'
assert isinstance(value, int), r'nwalkers must be an integer'
self._nwalkers = value
self._Chain = np.require(np.zeros(self._ndims*self._nwalkers*self._nsteps),
requirements=['F', 'A', 'W', 'O', 'E'])
self._LnPrior = np.require(np.zeros(self._nwalkers*self._nsteps),
requirements=['F', 'A', 'W', 'O', 'E'])
self._LnLikelihood = np.require(np.zeros(self._nwalkers*self._nsteps),
requirements=['F', 'A', 'W', 'O', 'E'])
except AssertionError as err:
raise AttributeError(str(err))
@property
def nsteps(self):
return self._nsteps
@nsteps.setter
def nsteps(self, value):
try:
assert value >= 0, r'nsteps must be greater than or equal to 0'
assert isinstance(value, int), r'nsteps must be an integer'
self._nsteps = value
self._Chain = np.require(np.zeros(self._ndims*self._nwalkers*self._nsteps),
requirements=['F', 'A', 'W', 'O', 'E'])
self._LnPrior = np.require(np.zeros(self._nwalkers*self._nsteps),
requirements=['F', 'A', 'W', 'O', 'E'])
self._LnLikelihood = np.require(np.zeros(self._nwalkers*self._nsteps),
requirements=['F', 'A', 'W', 'O', 'E'])
except AssertionError as err:
raise AttributeError(str(err))
@property
def maxEvals(self):
return self._maxEvals
@maxEvals.setter
def maxEvals(self, value):
try:
assert value >= 0, r'maxEvals must be greater than or equal to 0'
assert isinstance(value, int), r'maxEvals must be an integer'
self._maxEvals = value
except AssertionError as err:
raise AttributeError(str(err))
@property
def xTol(self):
return self._xTol
@xTol.setter
def xTol(self, value):
try:
assert value >= 0, r'xTol must be greater than or equal to 0'
assert isinstance(value, float), r'xTol must be a float'
self._xTol = value
except AssertionError as err:
raise AttributeError(str(err))
@property
def mcmcA(self):
return self._mcmcA
@mcmcA.setter
def mcmcA(self, value):
try:
assert value >= 0, r'mcmcA must be greater than or equal to 0.0'
assert isinstance(value, float), r'mcmcA must be a float'
self._mcmcA = value
except AssertionError as err:
raise AttributeError(str(err))
@property
def Chain(self):
return np.reshape(self._Chain, newshape=(self._ndims, self._nwalkers, self._nsteps), order='F')
@property
def rootChain(self):
if hasattr(self, '_rootChain'):
return self._rootChain
else:
Chain = self.Chain
self._rootChain = np.require(
np.zeros((self._ndims, self._nwalkers, self._nsteps), dtype='complex128'),
requirements=['F', 'A', 'W', 'O', 'E'])
for stepNum in range(self._nsteps):
for walkerNum in range(self._nwalkers):
self._rootChain[:, walkerNum, stepNum] = roots(
self._p, self._q, Chain[:, walkerNum, stepNum])
return self._rootChain
@property
def timescaleChain(self):
if hasattr(self, '_timescaleChain'):
return self._timescaleChain
else:
rootChain = self.rootChain
self._timescaleChain = np.require(
np.zeros((self._ndims, self._nwalkers, self._nsteps), dtype='float64'),
requirements=['F', 'A', 'W', 'O', 'E'])
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for stepNum in range(self._nsteps):
for walkerNum in range(self._nwalkers):
self._timescaleChain[:, walkerNum, stepNum] = timescales(
self._p, self._q, rootChain[:, walkerNum, stepNum])
return self._timescaleChain
@property
def LnPrior(self):
return np.reshape(self._LnPrior, newshape=(self._nwalkers, self._nsteps), order='F')
@property
def LnLikelihood(self):
return np.reshape(self._LnLikelihood, newshape=(self._nwalkers, self._nsteps), order='F')
@property
def LnPosterior(self):
return self.LnPrior + self.LnLikelihood
@property
def pDIC(self):
return self._pDIC
@property
def dic(self):
return self._dic
def __repr__(self):
return "kali.mbhbcarma.MBHBCARMATask(%d, %d, %d, %d, %d, %d, %d, %f)"%(self._p, self._q,
self._nthreads, self._nburn,
self._nwalkers, self._nsteps,
self._maxEvals, self._xTol)
def __str__(self):
line = 'r: %d; p: %d; q: %d; ndims: %d\n'%(self._r, self._p, self._q, self._ndims)
line += 'nthreads (Number of hardware threads to use): %d\n'%(self._nthreads)
line += 'nburn (Number of light curve steps to burn): %d\n'%(self._nburn)
line += 'nwalkers (Number of MCMC walkers): %d\n'%(self._nwalkers)
line += 'nsteps (Number of MCMC steps): %d\n'%(self.nsteps)
line += 'maxEvals (Maximum number of evaluations when attempting to find starting location for MCMC):\
%d\n'%(self._maxEvals)
line += 'xTol (Fractional tolerance in optimized parameter value): %f'%(self._xTol)
return line
def __eq__(self, other):
if self.__class__ == other.__class__:
if ((self._r == other._r) and (self._p == other.p) and (self._q == other.q) and
(self._nthreads == other.nthreads) and (self._nburn == other.nburn) and
(self._nwalkers == other.nwalkers) and (self._nsteps == other.nsteps) and
(self._maxEvals == other.maxEvals) and (self.xTol == other.xTol)):
return True
else:
return False
else:
return False
def __neq__(self, other):
if self == other:
return False
else:
return True
def reset(self, p=None, q=None, nburn=None, nwalkers=None, nsteps=None):
if p is None:
p = self._p
if q is None:
q = self._q
if nburn is None:
nburn = self._nburn
if nwalkers is None:
nwalkers = self._nwalkers
if nsteps is None:
nsteps = self._nsteps
try:
assert p > q, r'p must be greater than q'
assert p >= 1, r'p must be greater than or equal to 1'
assert isinstance(p, int), r'p must be an integer'
assert q >= 0, r'q must be greater than or equal to 0'
assert isinstance(q, int), r'q must be an integer'
assert nburn >= 0, r'nburn must be greater than or equal to 0'
assert isinstance(nburn, int), r'nburn must be an integer'
assert nwalkers > 0, r'nwalkers must be greater than 0'
assert isinstance(nwalkers, int), r'nwalkers must be an integer'
assert nsteps > 0, r'nsteps must be greater than 0'
assert isinstance(nsteps, int), r'nsteps must be an integer'
self._taskCython.reset_MBHBCARMATask(p, q, nburn)
self._p = p
self._q = q
self._ndims = self._p + self._q + 1
self._nburn = nburn
self._nwalkers = nwalkers
self._nsteps = nsteps
self._Chain = np.require(np.zeros(self._ndims*self._nwalkers*self._nsteps),
requirements=['F', 'A', 'W', 'O', 'E'])
self._LnPrior = np.require(np.zeros(self._nwalkers*self._nsteps),
requirements=['F', 'A', 'W', 'O', 'E'])
self._LnLikelihood = np.require(np.zeros(self._nwalkers*self._nsteps),
requirements=['F', 'A', 'W', 'O', 'E'])
except AssertionError as err:
raise AttributeError(str(err))
def check(self, Theta, tnum=None):
if tnum is None:
tnum = 0
assert Theta.shape == (self._ndims,), r'Too many coefficients in Theta'
return bool(self._taskCython.check_Theta(Theta, tnum))
def set(self, dt, Theta, tnum=None):
if tnum is None:
tnum = 0
assert dt > 0.0, r'dt must be greater than 0.0'
assert isinstance(dt, float), r'dt must be a float'
assert Theta.shape == (self._ndims,), r'Too many coefficients in Theta'
return self._taskCython.set_System(dt, Theta, tnum)
def dt(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_dt(tnum)
def Theta(self, tnum=None):
if tnum is None:
tnum = 0
Theta = np.require(np.zeros(self._ndims), requirements=['F', 'A', 'W', 'O', 'E'])
self._taskCython.get_Theta(Theta, tnum)
return Theta
def list(self):
setSystems = np.require(
np.zeros(self._nthreads, dtype='int32'), requirements=['F', 'A', 'W', 'O', 'E'])
self._taskCython.get_setSystemsVec(setSystems)
return setSystems.astype(np.bool_)
def show(self, tnum=None):
if tnum is None:
tnum = 0
self._taskCython.print_System(tnum)
def Sigma(self, tnum=None):
if tnum is None:
tnum = 0
Sigma = np.require(np.zeros(self._p*self._p), requirements=['F', 'A', 'W', 'O', 'E'])
self._taskCython.get_Sigma(Sigma, tnum)
return np.reshape(Sigma, newshape=(self._p, self._p), order='F')
def X(self, newX=None, tnum=None):
if tnum is None:
tnum = 0
if not newX:
X = np.zeros(self._p)
self._taskCython.get_X(X, tnum)
return np.reshape(X, newshape=(self._p), order='F')
else:
self._taskCython.set_X(np.reshape(X, newshape=(self._p), order='F'), tnum)
return newX
def P(self, newP=None, tnum=None):
if tnum is None:
tnum = 0
if not newP:
P = np.zeros(self._p*self._p)
self._taskCython.get_P(P, tnum)
return np.reshape(P, newshape=(self._p, self._p), order='F')
else:
self._taskCython.set_P(np.reshape(P, newshape=(self._p*self._p), order='F'), tnum)
return newP
def epoch(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_Epoch(tnum)
def period(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_Period(tnum)
def a1(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_A1(tnum)
def a2(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_A2(tnum)
def m1(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_M1(tnum)
def m2(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_M2(tnum)
def m12(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_M12(tnum)
def mratio(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_M2OverM1(tnum)
def rPeribothron1(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_RPeribothron1(tnum)
def rPeribothron2(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_RPeribothron2(tnum)
def rApobothron1(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_RApobothron1(tnum)
def rApobothron2(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_RApobothron2(tnum)
def rPeribothron(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_RPeribothronTot(tnum)
def rApobothron(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_RApobothronTot(tnum)
def rS1(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_RS1(tnum)
def rS2(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_RS2(tnum)
def eccentricity(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_Eccentricity(tnum)
def omega1(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_Omega1(tnum)
def omega2(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_Omega2(tnum)
def inclination(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_Inclination(tnum)
def tau(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_Tau(tnum)
def M(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_MeanAnomoly(tnum)
def E(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_EccentricAnomoly(tnum)
def nu(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_TrueAnomoly(tnum)
def r1(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_R1(tnum)
def r2(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_R2(tnum)
def theta1(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_Theta1(tnum)
def theta2(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_Theta2(tnum)
def Beta1(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_Beta1(tnum)
def Beta2(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_Beta2(tnum)
def radialBeta1(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_RadialBeta1(tnum)
def radialBeta2(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_RadialBeta2(tnum)
def dopplerFactor1(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_DopplerFactor1(tnum)
def dopplerFactor2(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_DopplerFactor2(tnum)
def beamingFactor1(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_BeamingFactor1(tnum)
def beamingFactor2(self, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_BeamingFactor2(tnum)
def aH(self, sigmaStars=200.0, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_aH(sigmaStars, tnum)
def aGW(self, sigmaStars=200.0, rhoStars=1000.0, H=16, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_aGW(sigmaStars, rhoStars, H, tnum)
def durationInHardState(self, sigmaStars=200.0, rhoStars=1000.0, H=16, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_durationInHardState(sigmaStars, rhoStars, H, tnum)
def ejectedMass(self, sigmaStars=200.0, rhoStars=1000.0, H=16, tnum=None):
if tnum is None:
tnum = 0
return self._taskCython.get_ejectedMass(sigmaStars, rhoStars, H, tnum)
def beam(self, duration=None, tIn=None, tnum=None, tolIR=1.0e-3, startT=0.0,
fracIntrinsicVar=0.15, fracNoiseToSignal=0.001,
maxSigma=2.0, minTimescale=2.0, maxTimescale=0.5):
if tnum is None:
tnum = 0
if tIn is None and duration is not None:
numCadences = int(round(float(duration)/self._taskCython.get_dt(threadNum=tnum)))
intrinsicLC = kali.lc.mockLC(name='', band='', numCadences=numCadences,
deltaT=self._taskCython.get_dt(threadNum=tnum), tolIR=tolIR,
startT=startT, fracIntrinsicVar=fracIntrinsicVar,
fracNoiseToSignal=fracNoiseToSignal, maxSigma=maxSigma,
minTimescale=minTimescale, maxTimescale=maxTimescale,
pSim=self._p, qSim=self._q)
elif duration is None and tIn is not None:
numCadences = tIn.shape[0]
t = np.require(np.array(tIn), requirements=['F', 'A', 'W', 'O', 'E'])
intrinsicLC = kali.lc.mockLC(name='', band='', tIn=t, startT=startT,
fracIntrinsicVar=fracIntrinsicVar,
fracNoiseToSignal=fracNoiseToSignal, tolIR=tolIR, maxSigma=maxSigma,
minTimescale=minTimescale, maxTimescale=maxTimescale,
pSim=self._p, qSim=self._q)
self._taskCython.make_BeamedLC(
intrinsicLC.numCadences, intrinsicLC.tolIR, intrinsicLC.startT, intrinsicLC.fracIntrinsicVar,
intrinsicLC.fracNoiseToSignal, intrinsicLC.t, intrinsicLC.x, intrinsicLC.y, intrinsicLC.yerr,
intrinsicLC.mask, intrinsicLC.XSim, intrinsicLC.PSim, threadNum=tnum)
intrinsicLC._simulatedCadenceNum = numCadences - 1
intrinsicLC._T = intrinsicLC.t[-1] - intrinsicLC.t[0]
return intrinsicLC
def simulate(self, duration=None, tIn=None, tolIR=1.0e-3, startT=0.0,
fracIntrinsicVar=0.15, fracNoiseToSignal=0.001,
maxSigma=2.0, minTimescale=2.0, maxTimescale=0.5, burnSeed=None, distSeed=None,
tnum=None):
if tnum is None:
tnum = 0
if tIn is None and duration is not None:
numCadences = int(round(float(duration)/self._taskCython.get_dt(threadNum=tnum)))
intrinsicLC = kali.lc.mockLC(name='', band='', numCadences=numCadences,
deltaT=self._taskCython.get_dt(threadNum=tnum), tolIR=tolIR,
startT=startT, fracIntrinsicVar=fracIntrinsicVar,
fracNoiseToSignal=fracNoiseToSignal, maxSigma=maxSigma,
minTimescale=minTimescale, maxTimescale=maxTimescale,
pSim=self._p, qSim=self._q)
elif duration is None and tIn is not None:
numCadences = tIn.shape[0]
t = np.require(np.array(tIn), requirements=['F', 'A', 'W', 'O', 'E'])
intrinsicLC = kali.lc.mockLC(name='', band='', tIn=t, startT=startT,
fracIntrinsicVar=fracIntrinsicVar,
fracNoiseToSignal=fracNoiseToSignal, tolIR=tolIR, maxSigma=maxSigma,
minTimescale=minTimescale, maxTimescale=maxTimescale,
pSim=self._p, qSim=self._q)
for i in range(intrinsicLC.numCadences):
intrinsicLC.mask[i] = 1.0
randSeed = np.zeros(1, dtype='uint32')
if burnSeed is None:
rand.rdrand(randSeed)
burnSeed = randSeed[0]
if distSeed is None:
rand.rdrand(randSeed)
distSeed = randSeed[0]
self._taskCython.make_IntrinsicLC(
intrinsicLC.numCadences, intrinsicLC.tolIR, intrinsicLC.startT, intrinsicLC.fracIntrinsicVar,
intrinsicLC.fracNoiseToSignal, intrinsicLC.t, intrinsicLC.x, intrinsicLC.y, intrinsicLC.yerr,
intrinsicLC.mask, intrinsicLC.XSim, intrinsicLC.PSim, burnSeed, distSeed, threadNum=tnum)
intrinsicLC._simulatedCadenceNum = numCadences - 1
intrinsicLC._T = intrinsicLC.t[-1] - intrinsicLC.t[0]
return intrinsicLC
'''def extend(
self, intrinsicLC, duration=None, tIn=None, gap=None, distSeed=None, noiseSeed=None, tnum=None):
if tnum is None:
tnum = 0
randSeed = np.zeros(1, dtype='uint32')
if distSeed is None:
rand.rdrand(randSeed)
distSeed = randSeed[0]
if noiseSeed is None:
rand.rdrand(randSeed)
noiseSeed = randSeed[0]
if intrinsicLC.pSim != self.p:
intrinsicLC.pSim = self.p
if intrinsicLC.qSim != self.q:
intrinsicLC.qSim = self.q
if gap is None:
gapSize = 0.0
else:
gapSize = gap
oldNumCadences = intrinsicLC.numCadences
gapNumCadences = int(round(float(gapSize)/self._taskCython.get_dt(threadNum=tnum)))
if tIn is None and duration is not None:
extraNumCadences = int(round(float(duration + gapSize)/self._taskCython.get_dt(threadNum=tnum)))
elif duration is None and gap is None and tIn is not None:
extraNumCadences = np.array(tIn).shape[0]
else:
raise ValueError('Cannot specify both tIn and gap at the same time')
newNumCadences = intrinsicLC.numCadences + extraNumCadences
newt = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
newx = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
newy = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
newyerr = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
newmask = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
for i in range(intrinsicLC.numCadences):
newt[i] = intrinsicLC.t[i]
newx[i] = intrinsicLC.x[i]
newy[i] = intrinsicLC.y[i]
newyerr[i] = intrinsicLC.yerr[i]
newmask[i] = intrinsicLC.mask[i]
if tIn is None and duration is not None:
for i in range(intrinsicLC.numCadences, newNumCadences):
newt[i] = newt[intrinsicLC.numCadences - 1] + gapSize + \
(i - intrinsicLC.numCadences + 1)*self._taskCython.get_dt(threadNum=tnum)
newmask[i] = 1.0
elif duration is None and gap is None and tIn is not None:
for i in range(intrinsicLC.numCadences, newNumCadences):
newt[i] = tIn[i - intrinsicLC.numCadences]
newmask[i] = 1.0
else:
raise ValueError('Cannot specify both tIn and gap at the same time')
intrinsicLC._numCadences = newNumCadences
self._taskCython.extend_IntrinsicLC(
intrinsicLC.numCadences, intrinsicLC._simulatedCadenceNum, intrinsicLC._tolIR,
intrinsicLC._fracIntrinsicVar, intrinsicLC._fracNoiseToSignal, newt, newx, newy, newyerr, newmask,
intrinsicLC.XSim, intrinsicLC.PSim, distSeed, noiseSeed, threadNum=tnum)
if gap is not None:
old, gap, new = np.split(newt, [oldNumCadences, oldNumCadences + gapNumCadences])
newt = np.require(np.concatenate((old, new)), requirements=['F', 'A', 'W', 'O', 'E'])
old, gap, new = np.split(newx, [oldNumCadences, oldNumCadences + gapNumCadences])
newx = np.require(np.concatenate((old, new)), requirements=['F', 'A', 'W', 'O', 'E'])
old, gap, new = np.split(newy, [oldNumCadences, oldNumCadences + gapNumCadences])
newy = np.require(np.concatenate((old, new)), requirements=['F', 'A', 'W', 'O', 'E'])
old, gap, new = np.split(newyerr, [oldNumCadences, oldNumCadences + gapNumCadences])
newyerr = np.require(np.concatenate((old, new)), requirements=['F', 'A', 'W', 'O', 'E'])
old, gap, new = np.split(newmask, [oldNumCadences, oldNumCadences + gapNumCadences])
newmask = np.require(np.concatenate((old, new)), requirements=['F', 'A', 'W', 'O', 'E'])
intrinsicLC._simulatedCadenceNum = newt.shape[0] - 1
intrinsicLC._numCadences = newt.shape[0]
intrinsicLC.t = newt
intrinsicLC.x = newx
intrinsicLC.y = newy
intrinsicLC.yerr = newyerr
intrinsicLC.mask = newmask
count = int(np.sum(intrinsicLC.mask))
y_meanSum = 0.0
yerr_meanSum = 0.0
for i in range(intrinsicLC.numCadences):
y_meanSum += intrinsicLC.mask[i]*intrinsicLC.y[i]
yerr_meanSum += intrinsicLC.mask[i]*intrinsicLC.yerr[i]
if count > 0.0:
intrinsicLC._mean = y_meanSum/count
intrinsicLC._meanerr = yerr_meanSum/count
else:
intrinsicLC._mean = 0.0
intrinsicLC._meanerr = 0.0
y_stdSum = 0.0
yerr_stdSum = 0.0
for i in range(intrinsicLC.numCadences):
y_stdSum += math.pow(intrinsicLC.mask[i]*intrinsicLC.y[i] - intrinsicLC._mean, 2.0)
yerr_stdSum += math.pow(intrinsicLC.mask[i]*intrinsicLC.yerr[i] - intrinsicLC._meanerr, 2.0)
if count > 0.0:
intrinsicLC._std = math.sqrt(y_stdSum/count)
intrinsicLC._stderr = math.sqrt(yerr_stdSum/count)
else:
intrinsicLC._std = 0.0
intrinsicLC._stderr = 0.0'''
def observe(self, intrinsicLC, noiseSeed=None, tnum=None):
if tnum is None:
tnum = 0
randSeed = np.zeros(1, dtype='uint32')
if noiseSeed is None:
rand.rdrand(randSeed)
noiseSeed = randSeed[0]
if intrinsicLC._observedCadenceNum == -1:
self._taskCython.add_ObservationNoise(
intrinsicLC.numCadences, intrinsicLC.tolIR, intrinsicLC.fracIntrinsicVar,
intrinsicLC.fracNoiseToSignal, intrinsicLC.t, intrinsicLC.x, intrinsicLC.y, intrinsicLC.yerr,
intrinsicLC.mask, noiseSeed, threadNum=tnum)
'''else:
self._taskCython.extend_ObservationNoise(
intrinsicLC.numCadences, intrinsicLC.observedCadenceNum, intrinsicLC.tolIR,
intrinsicLC.fracIntrinsicVar, intrinsicLC.fracNoiseToSignal, intrinsicLC.t, intrinsicLC.x,
intrinsicLC.y, intrinsicLC.yerr, intrinsicLC.mask, noiseSeed, threadNum=tnum)
intrinsicLC._observedCadenceNum = intrinsicLC._numCadences - 1'''
intrinsicLC._statistics()
def logPrior(self, observedLC, forced=True, widthT=0.01, widthF=0.05, tnum=None):
if tnum is None:
tnum = 0
periodEst = self.estimate(observedLC)
observedLC._logPrior = self._taskCython.compute_LnPrior(observedLC.numCadences, observedLC.meandt,
observedLC.tolIR,
observedLC.startT,
observedLC.maxSigma*observedLC.std,
observedLC.minTimescale*observedLC.mindt,
observedLC.maxTimescale*observedLC.T,
np.min(observedLC.y), np.max(observedLC.y),
observedLC.t, observedLC.x,
observedLC.y, observedLC.yerr,
observedLC.mask,
periodEst, widthT*periodEst,
observedLC.mean, widthF*observedLC.mean,
tnum)
return observedLC._logPrior
def logLikelihood(self, observedLC, widthT=0.01, widthF=0.05, forced=True, tnum=None):
if tnum is None:
tnum = 0
observedLC.pComp = self.p
observedLC.qComp = self.q
periodEst = self.estimate(observedLC)
observedLC._logPrior = self.logPrior(observedLC, widthT=widthT, widthF=widthF,
forced=forced, tnum=tnum)
if forced is True:
observedLC._computedCadenceNum = -1
if observedLC._computedCadenceNum == -1:
for rowCtr in range(observedLC.pComp):
observedLC.XComp[rowCtr] = 0.0
for colCtr in range(observedLC.pComp):
observedLC.PComp[rowCtr + observedLC.pComp*colCtr] = 0.0
observedLC._logLikelihood = self._taskCython.compute_LnLikelihood(
observedLC.numCadences, observedLC._computedCadenceNum, observedLC.tolIR, observedLC.startT,
observedLC.t, observedLC.x, observedLC.y, observedLC.yerr, observedLC.mask,
observedLC.XComp, observedLC.PComp,
periodEst, widthT*periodEst,
observedLC.mean, widthF*observedLC.mean,
tnum)
observedLC._logPosterior = observedLC._logPrior + observedLC._logLikelihood
observedLC._computedCadenceNum = observedLC.numCadences - 1
elif observedLC._computedCadenceNum == observedLC.numCadences - 1:
pass
'''else:
observedLC._logLikelihood = self._taskCython.update_LnLikelihood(
observedLC.numCadences, observedLC._computedCadenceNum, observedLC._logLikelihood,
observedLC.tolIR, observedLC.t, observedLC.x, observedLC.y - observedLC.mean, observedLC.yerr,
observedLC.mask, observedLC.XComp, observedLC.PComp, tnum)
observedLC._logPosterior = observedLC._logPrior + observedLC._logLikelihood
observedLC._computedCadenceNum = observedLC.numCadences - 1'''
return observedLC._logLikelihood
def logPosterior(self, observedLC, forced=True, tnum=None):
lnLikelihood = self.logLikelihood(observedLC, forced=forced, tnum=tnum)
return observedLC._logPosterior
'''def acvf(self, start=0.0, stop=100.0, num=100, endpoint=True, base=10.0, spacing='linear'):
if spacing.lower() in ['log', 'logarithm', 'ln', 'log10']:
lags = np.logspace(np.log10(start)/np.log10(base), np.log10(
stop)/np.log10(base), num=num, endpoint=endpoint, base=base)
elif spacing.lower() in ['linear', 'lin']:
lags = np.linspace(start, stop, num=num, endpoint=endpoint)
else:
raise RuntimeError('Unable to parse spacing')
acvf = np.zeros(num)
self._taskCython.compute_ACVF(num, lags, acvf)
return lags, acvf
def acf(self, start=0.0, stop=100.0, num=100, endpoint=True, base=10.0, spacing='linear'):
if spacing.lower() in ['log', 'logarithm', 'ln', 'log10']:
lags = np.logspace(np.log10(start)/np.log10(base), np.log10(
stop)/np.log10(base), num=num, endpoint=endpoint, base=base)
elif spacing.lower() in ['linear', 'lin']:
lags = np.linspace(start, stop, num=num, endpoint=endpoint)
else:
raise RuntimeError('Unable to parse spacing')
acvf = np.zeros(num)
acf = np.zeros(num)
self._taskCython.compute_ACVF(num, lags, acvf)
acf = acvf/acvf[0]
return lags, acf
def sf(self, start=0.0, stop=100.0, num=100, endpoint=True, base=10.0, spacing='linear'):
if spacing.lower() in ['log', 'logarithm', 'ln', 'log10']:
lags = np.logspace(np.log10(start)/np.log10(base), np.log10(
stop)/np.log10(base), num=num, endpoint=endpoint, base=base)
elif spacing.lower() in ['linear', 'lin']:
lags = np.linspace(start, stop, num=num, endpoint=endpoint)
else:
raise RuntimeError('Unable to parse spacing')
acvf = np.zeros(num)
sf = np.zeros(num)
self._taskCython.compute_ACVF(num, lags, acvf)
sf = 2.0*(acvf[0] - acvf)
return lags, sf
def plotacvf(self, fig=-2, LC=None, newdt=None, doShow=False, clearFig=True):
newFig = plt.figure(fig, figsize=(fwid, fhgt))
if clearFig:
plt.clf()
if LC is not None:
lagsM, acvfM = self.acvf(start=LC.dt, stop=LC.T, num=1000, spacing='linear')
else:
lagsM, acvfM = self.acvf(start=0.0, stop=1000.0, num=1000, spacing='linear')
plt.plot(lagsM, acvfM, label=r'model Autocovariance Function', color='#984ea3', zorder=5)
if LC is not None:
if np.sum(LC.y) != 0.0:
lagsE, acvfE, acvferrE = LC.acvf(newdt)
if np.sum(acvfE) != 0.0:
plt.errorbar(lagsE[1:], acvfE[1:], acvferrE[1:], label=r'obs. Autocovariance Function',
fmt='o', capsize=0, color='#ff7f00', markeredgecolor='none', zorder=0)
plt.xlim(lagsE[1], lagsE[-1])
plt.xlabel(r'$\delta t$')
plt.ylabel(r'$\log ACVF$')
plt.title(r'Autocovariance Function')
plt.legend(loc=3)
if doShow:
plt.show(False)
return newFig
def plotacf(self, fig=-3, LC=None, newdt=None, doShow=False, clearFig=True):
newFig = plt.figure(fig, figsize=(fwid, fhgt))
if clearFig:
plt.clf()
if LC is not None:
lagsM, acfM = self.acf(start=LC.dt, stop=LC.T, num=1000, spacing='linear')
else:
lagsM, acfM = self.acf(start=0.0, stop=1000.0, num=1000, spacing='linear')
plt.plot(lagsM, acfM, label=r'model Autocorrelation Function', color='#984ea3', zorder=5)
if LC is not None:
if np.sum(LC.y) != 0.0:
lagsE, acfE, acferrE = LC.acf(newdt)
if np.sum(acfE) != 0.0:
plt.errorbar(lagsE[1:], acfE[1:], acferrE[1:], label=r'obs. Autocorrelation Function',
fmt='o', capsize=0, color='#ff7f00', markeredgecolor='none', zorder=0)
plt.xlim(lagsE[1], lagsE[-1])
plt.xlabel(r'$\delta t$')
plt.ylabel(r'$\log ACF$')
plt.title(r'Autocorrelation Function')
plt.legend(loc=3)
plt.ylim(-1.0, 1.0)
if doShow:
plt.show(False)
return newFig
def plotsf(self, fig=-4, LC=None, newdt=None, doShow=False, clearFig=True):
newFig = plt.figure(fig, figsize=(fwid, fhgt))
if clearFig:
plt.clf()
if LC is not None and np.sum(LC.y) != 0.0:
lagsE, sfE, sferrE = LC.sf(newdt)
lagsM, sfM = self.sf(start=lagsE[1], stop=lagsE[-1], num=1000, spacing='log')
else:
lagsM, sfM = self.sf(start=0.001, stop=1000.0, num=1000, spacing='log')
plt.plot(np.log10(lagsM[1:]), np.log10(
sfM[1:]), label=r'model Structure Function', color='#984ea3', zorder=5)
if LC is not None:
if np.sum(LC.y) != 0.0:
if np.sum(sfE) != 0.0:
plt.scatter(
np.log10(lagsE[np.where(sfE != 0.0)[0]]), np.log10(sfE[np.where(sfE != 0.0)[0]]),
marker='o', label=r'obs. Structure Function', color='#ff7f00', edgecolors='none',
zorder=0)
plt.xlim(math.log10(lagsM[1]), math.log10(lagsM[-1]))
plt.xlabel(r'$\delta t$')
plt.ylabel(r'$\log SF$')
plt.title(r'Structure Function')
plt.legend(loc=2)
if doShow:
plt.show(False)
return newFig
def _psddenominator(self, freqs, order):
nfreqs = freqs.shape[0]
aList = self.Theta()[0:self.p].tolist()
aList.insert(0, 1.0)
psddenominator = np.zeros(nfreqs)
if ((order % 2 == 1) or (order <= -1) or (order > 2*self.p)):
aList.pop(0)
return PSDVals
else:
for freq in range(nfreqs):
val = 0.0
for i in range(self.p + 1):
j = 2*self.p - i - order
if ((j >= 0) and (j < self.p + 1)):
val += (aList[i]*aList[j]*((2.0*math.pi*1j*freqs[freq])**(
2*self.p - (i + j)))*pow(-1.0, self.p - j)).real
psddenominator[freq] = val
aList.pop(0)
return psddenominator
def _psdnumerator(self, freqs, order):
nfreqs = freqs.shape[0]
bList = self.Theta()[self.p:self.p+self.q+1].tolist()
psdnumerator = np.zeros(nfreqs)
if ((order % 2 == 1) or (order <= -1) or (order > 2*self.q)):
return psdnumerator
else:
for freq in range(nfreqs):
val = 0.0
for i in range(self.q + 1):
j = 2*self.q - i - order
if ((j >= 0) and (j < self.q + 1)):
val += (bList[i]*bList[j]*((2.0*math.pi*1j*freqs[freq])**(
2*self.q - (i + j)))*pow(-1.0, self.q - j)).real
psdnumerator[freq] = val
return psdnumerator
def psd(self, start=0.1, stop=100.0, num=100, endpoint=True, base=10.0, spacing='log'):
if spacing.lower() in ['log', 'logarithm', 'ln', 'log10']:
freqs = np.logspace(np.log10(start)/np.log10(base), np.log10(
stop)/np.log10(base), num=num, endpoint=endpoint, base=base)
elif spacing.lower() in ['linear', 'lin']:
freqs = np.linspace(start, stop, num=num, endpoint=endpoint)
else:
raise RuntimeError('Unable to parse spacing')
maxDenomOrder = 2*self.p
maxNumerOrder = 2*self.q
psdnumeratorcomponent = np.zeros((num, (maxNumerOrder/2) + 1))
psddenominatorcomponent = np.zeros((num, (maxDenomOrder/2) + 1))
psdnumerator = np.zeros(num)
psddenominator = np.zeros(num)
psd = np.zeros(num)
for orderVal in range(0, maxNumerOrder + 1, 2):
psdnumeratorcomponent[:, orderVal/2] = self._psdnumerator(freqs, orderVal)
for orderVal in range(0, maxDenomOrder + 1, 2):
psddenominatorcomponent[:, orderVal/2] = self._psddenominator(freqs, orderVal)
for freq in range(num):
for orderVal in range(0, maxNumerOrder + 1, 2):
psdnumerator[freq] += psdnumeratorcomponent[freq, orderVal/2]
for orderVal in range(0, maxDenomOrder + 1, 2):
psddenominator[freq] += psddenominatorcomponent[freq, orderVal/2]
psd[freq] = psdnumerator[freq]/psddenominator[freq]
return freqs, psd, psdnumerator, psddenominator, psdnumeratorcomponent, psddenominatorcomponent
def plotpsd(self, fig=-5, LC=None, doShow=False, clearFig=True):
newFig = plt.figure(fig, figsize=(fwid, fhgt))
if clearFig:
plt.clf()
if LC is not None:
start = LC.mindt
stop = LC.T
else:
start = 1.0e-4
stop = 1.0e3
freqsM, psdM, psdNumer, psdDenom, psdNumerComp, psdDenomComp = self.psd(
start=start, stop=stop, num=1000, spacing='log')
plt.plot(np.log10(freqsM[1:]), np.log10(
psdM[1:]), label=r'$\ln PSD$', color='#000000', zorder=5, linewidth=6)
plt.plot(np.log10(freqsM[1:]), np.log10(psdNumer[1:]),
label=r'$\ln PSD_{\mathrm{numerator}}$', color='#1f78b4', zorder=0, linewidth=4)
plt.plot(np.log10(freqsM[1:]), -np.log10(psdDenom[1:]),
label=r'$-\ln PSD_{\mathrm{denominator}}$', color='#e31a1c', zorder=0, linewidth=4)
for i in range(psdNumerComp.shape[1]):
plt.plot(np.log10(freqsM[1:]), np.log10(psdNumerComp[1:, i]),
color='#a6cee3', zorder=0, linewidth=2, linestyle=r'dashed')
plt.annotate(
r'$\nu^{%d}$'%(2*i), xy=(np.log10(freqsM[25]), np.log10(psdNumerComp[25, i])),
xycoords='data', xytext=(np.log10(freqsM[25]) + 0.25, np.log10(psdNumerComp[25, i]) + 0.5),
textcoords='data', arrowprops=dict(
arrowstyle='->', connectionstyle='angle3, angleA = 0, angleB = 90'),
ha='center', va='center', multialignment='center', zorder=100)
for i in range(psdDenomComp.shape[1]):
plt.plot(np.log10(freqsM[1:]), -np.log10(psdDenomComp[1:, i]),
color='#fb9a99', zorder=0, linewidth=2, linestyle=r'dashed')
plt.annotate(
r'$\nu^{%d}$'%(-2*i), xy=(np.log10(freqsM[-25]), -np.log10(psdDenomComp[-25, i])),
xycoords='data', xytext=(np.log10(freqsM[-25]) - 0.25, -np.log10(psdDenomComp[-25, i]) - 0.5),
textcoords='data', arrowprops=dict(
arrowstyle='->', connectionstyle='angle3, angleA = 0, angleB = 90'),
ha='center', va='center', multialignment='center', zorder=100)
plt.xlim(math.log10(freqsM[1]), math.log10(freqsM[-1]))
plt.xlabel(r'$\log \nu$')
plt.ylabel(r'$\log PSD$')
plt.title(r'Power Spectral Density')
plt.legend(loc=3)
if doShow:
plt.show(False)
return newFig'''
def estimate(self, observedLC):
"""!
Estimate period using gatspy or scipy.spectral.periodogram
"""
if observedLC.numCadences > 50:
model = gatspy.periodic.LombScargleFast()
else:
model = gatspy.periodic.LombScargle()
model.optimizer.set(quiet=True, period_range=(2.0*observedLC.meandt, observedLC.T))
model.fit(observedLC.t,
observedLC.y,
observedLC.yerr)
periodEst = model.best_period
if periodEst < 2.0*observedLC.meandt or periodEst > observedLC.T:
scaled_y = (observedLC.y - observedLC.mean)/observedLC.std
angFreqs = np.logspace(math.log10((2.0*math.pi)/(observedLC.T)),
math.log10((2.0*math.pi)/(2.0*observedLC.meandt)),
10000)
periodogram = scipy.signal.spectral.lombscargle(observedLC.t, scaled_y, angFreqs)
periodEst = 2.0*math.pi/angFreqs[np.argmax(periodogram)]
return periodEst
def guess(self, periodEst):
notGood = True
while notGood:
eccentricityGuess = random.uniform(0.0, 1.0)
m1Val = math.pow(10.0, random.uniform(4.0, 10.0))
m2Val = math.pow(10.0, random.uniform(4.0, math.log10(m1Val)))
totalMassVal = m1Val + m2Val
axisRatioVal = m2Val/m1Val
aTotCube = (self.G*math.pow(periodEst*self.Day, 2.0)*totalMassVal*self.SolarMass)/self.fourPiSq
totalAxisVal = math.pow(aTotCube, 1.0/3.0)
a1Guess = ((axisRatioVal*totalAxisVal)/(1 + axisRatioVal))/self.Parsec
a2Guess = (totalAxisVal/(1 + axisRatioVal))/self.Parsec
rS1Val = (2.0*self.G*m1Val*self.SolarMass)/(pow(self.c, 2.0))
rS2Val = (2.0*self.G*m2Val*self.SolarMass)/(pow(self.c, 2.0))
rPeribothronTotVal = (a1Guess + a2Guess)*self.Parsec*(1.0 - eccentricityGuess)
if rPeribothronTotVal > 10.0*(rS1Val + rS2Val):
notGood = False
break
return a1Guess, a2Guess, eccentricityGuess
def fit(self, observedLC, widthT=0.01, widthF=0.05,
zSSeed=None, walkerSeed=None, moveSeed=None, xSeed=None):
observedLC.pComp = self.p
observedLC.qComp = self.q
randSeed = np.zeros(1, dtype='uint32')
if zSSeed is None:
rand.rdrand(randSeed)
zSSeed = randSeed[0]
if walkerSeed is None:
rand.rdrand(randSeed)
walkerSeed = randSeed[0]
if moveSeed is None:
rand.rdrand(randSeed)
moveSeed = randSeed[0]
if xSeed is None:
rand.rdrand(randSeed)
xSeed = randSeed[0]
xStart = np.require(np.zeros(self.ndims*self.nwalkers), requirements=['F', 'A', 'W', 'O', 'E'])
minT = observedLC.mindt*observedLC.minTimescale
maxT = observedLC.T*observedLC.maxTimescale
minTLog10 = math.log10(minT)
maxTLog10 = math.log10(maxT)
periodEst = self.estimate(observedLC)
for walkerNum in range(self.nwalkers):
noSuccess = True
sigmaFactor = 1.0e0
expVal = ((maxTLog10 - minTLog10)*np.random.random(self._p + self._q + 1) + minTLog10)
a1Guess, a2Guess, eccentricityGuess = self.guess(periodEst)
RhoGuess = np.require(np.array([a1Guess, a2Guess, periodEst, eccentricityGuess,
random.uniform(0.0, 360.0), random.uniform(0.0, 90.0),
random.uniform(observedLC.startT,
observedLC.startT + periodEst),
observedLC.mean] + (-1.0/np.power(10.0, expVal)).tolist()),
requirements=['F', 'A', 'W', 'O', 'E'])
while noSuccess:
RhoGuess[self._r + self._p + self._q] = sigmaFactor*observedLC.std
ThetaGuess = coeffs(self._p, self._q, RhoGuess)
res = self.set(observedLC.dt, ThetaGuess)
lnPrior = self.logPrior(observedLC, widthT=widthT, widthF=widthF)
if res == 0 and not np.isinf(lnPrior):
noSuccess = False
else:
sigmaFactor *= 0.31622776601 # sqrt(0.1)
for dimNum in range(self.ndims):
xStart[dimNum + walkerNum*self.ndims] = ThetaGuess[dimNum]
res = self._taskCython.fit_CARMAModel(
observedLC.dt, observedLC.numCadences, observedLC.meandt, observedLC.tolIR,
observedLC.maxSigma*observedLC.std, observedLC.minTimescale*observedLC.mindt,
observedLC.maxTimescale*observedLC.T, np.min(observedLC.y), np.max(observedLC.y),
observedLC.startT, observedLC.t, observedLC.x, observedLC.y, observedLC.yerr, observedLC.mask,
self.nwalkers, self.nsteps, self.maxEvals, self.xTol, self.mcmcA,
zSSeed, walkerSeed, moveSeed, xSeed, xStart, self._Chain, self._LnPrior, self._LnLikelihood,
periodEst, widthT*periodEst,
observedLC.mean, widthF*observedLC.mean)
meanTheta = list()
for dimNum in range(self.ndims):
meanTheta.append(np.mean(self.Chain[dimNum, :, self.nsteps/2:]))
meanTheta = np.require(meanTheta, requirements=['F', 'A', 'W', 'O', 'E'])
self.set(observedLC.dt, meanTheta)
devianceThetaBar = -2.0*self.logLikelihood(observedLC)
barDeviance = np.mean(-2.0*self.LnLikelihood[:, self.nsteps/2:])
self._pDIC = barDeviance - devianceThetaBar
self._dic = devianceThetaBar + 2.0*self.pDIC
self.rootChain
self.timescaleChain
self.bestTheta
self.bestRho
self.bestTau
self.auxillaryChain
return res
@property
def bestTheta(self):
if hasattr(self, '_bestTheta'):
return self._bestTheta
else:
bestWalker = np.where(np.max(self.LnPosterior) == self.LnPosterior)[0][0]
bestStep = np.where(np.max(self.LnPosterior) == self.LnPosterior)[1][0]
self._bestTheta = copy.copy(self.Chain[:, bestWalker, bestStep])
return self._bestTheta
@property
def bestRho(self):
if hasattr(self, '_bestRho'):
return self._bestRho
else:
bestWalker = np.where(np.max(self.LnPosterior) == self.LnPosterior)[0][0]
bestStep = np.where(np.max(self.LnPosterior) == self.LnPosterior)[1][0]
self._bestRho = copy.copy(self.rootChain[:, bestWalker, bestStep])
return self._bestRho
@property
def bestTau(self):
if hasattr(self, '_bestTau'):
return self._bestTau
else:
bestWalker = np.where(np.max(self.LnPosterior) == self.LnPosterior)[0][0]
bestStep = np.where(np.max(self.LnPosterior) == self.LnPosterior)[1][0]
self._bestTau = copy.copy(self.timescaleChain[:, bestWalker, bestStep])
return self._bestTau
def clear(self):
if hasattr(self, '_rootChain'):
del self._rootChain
if hasattr(self, '_timescaleChain'):
del self._timescaleChain
if hasattr(self, '_bestTheta'):
del self._bestTheta
if hasattr(self, '_bestRho'):
del self._bestRho
if hasattr(self, '_bestTau'):
del self._bestTau
def smooth(self, observedLC, startT=None, stopT=None, tnum=None):
if tnum is None:
tnum = 0
if observedLC.dtSmooth is None or observedLC.dtSmooth == 0.0:
observedLC.dtSmooth = observedLC.mindt/10.0
observedLC.pComp = self.p
observedLC.qComp = self.q
if not startT:
startT = observedLC.t[0]
if not stopT:
stopT = observedLC.t[-1]
t = sorted(observedLC.t.tolist() + np.linspace(start=startT, stop=stopT,
num=int(math.ceil((stopT - startT)/observedLC.dtSmooth)), endpoint=False).tolist())
t = _f7(t) # remove duplicates
observedLC.numCadencesSmooth = len(t)
observedLC.tSmooth = np.require(np.array(t), requirements=[
'F', 'A', 'W', 'O', 'E'])
observedLC.xSmooth = np.require(np.zeros(observedLC.numCadencesSmooth), requirements=[
'F', 'A', 'W', 'O', 'E'])
observedLC.xerrSmooth = np.require(np.zeros(observedLC.numCadencesSmooth), requirements=[
'F', 'A', 'W', 'O', 'E'])
observedLC.ySmooth = np.require(np.zeros(observedLC.numCadencesSmooth), requirements=[
'F', 'A', 'W', 'O', 'E'])
observedLC.yerrSmooth = np.require(
np.zeros(observedLC.numCadencesSmooth), requirements=['F', 'A', 'W', 'O', 'E'])
observedLC.maskSmooth = np.require(
np.zeros(observedLC.numCadencesSmooth), requirements=['F', 'A', 'W', 'O', 'E'])
observedLC.XSmooth = np.require(np.zeros(observedLC.numCadencesSmooth*observedLC.pComp),
requirements=['F', 'A', 'W', 'O', 'E'])
observedLC.PSmooth = np.require(np.zeros(
observedLC.numCadencesSmooth*observedLC.pComp*observedLC.pComp),
requirements=['F', 'A', 'W', 'O', 'E'])
unObsErr = math.sqrt(sys.float_info.max)
obsCtr = 0
for i in range(observedLC.numCadencesSmooth):
if observedLC.tSmooth[i] == observedLC.t[obsCtr]:
observedLC.xSmooth[i] = 0.0
observedLC.xerrSmooth[i] = unObsErr
observedLC.ySmooth[i] = observedLC.y[obsCtr]
observedLC.yerrSmooth[i] = observedLC.yerr[obsCtr]
observedLC.maskSmooth[i] = observedLC.mask[obsCtr]
if obsCtr < observedLC.numCadences - 1:
obsCtr += 1
else:
observedLC.xSmooth[i] = 0.0
observedLC.xerrSmooth[i] = unObsErr
observedLC.ySmooth[i] = 0.0
observedLC.yerrSmooth[i] = unObsErr
observedLC.maskSmooth[i] = 0.0
res = self._taskCython.smooth_RTS(
observedLC.numCadencesSmooth, -1, observedLC.tolIR, observedLC.startT,
observedLC.tSmooth, observedLC.xSmooth,
observedLC.ySmooth, observedLC.yerrSmooth, observedLC.maskSmooth,
observedLC.XComp, observedLC.PComp, observedLC.XSmooth, observedLC.PSmooth,
observedLC.xSmooth, observedLC.xerrSmooth, tnum)
observedLC._isSmoothed = True
return res
@property
def auxillaryChain(self):
if hasattr(self, '_auxillaryChain'):
return np.reshape(self._auxillaryChain, newshape=(13, self._nwalkers, self._nsteps), order='F')
else:
self._auxillaryChain = np.require(np.zeros(13*self.nwalkers*self.nsteps),
requirements=['F', 'A', 'W', 'O', 'E'])
MBHBCARMATask_cython.compute_Aux(self.ndims, self.nwalkers, self.nsteps,
self.sigmaStars, self.H, self.rhoStars,
self._Chain, self._auxillaryChain)
return np.reshape(self._auxillaryChain, newshape=(13, self._nwalkers, self._nsteps), order='F')
def plotscatter(self, dimx, dimy, truthx=None, truthy=None, labelx=None, labely=None,
best=False, median=False,
fig=-6, doShow=False, clearFig=True):
newFig = plt.figure(fig, figsize=(fwid, fhgt))
if clearFig:
plt.clf()
if dimx < self.ndims and dimy < self.ndims:
plt.scatter(self.timescaleChain[dimx, :, self.nsteps/2:],
self.timescaleChain[dimy, :, self.nsteps/2:],
c=self.LnPosterior[:, self.nsteps/2:], edgecolors='none')
plt.colorbar()
if best:
loc0 = np.where(self.LnPosterior[self.nsteps/2:] ==
np.max(self.LnPosterior[self.nsteps/2:]))[0][0]
loc1 = np.where(self.LnPosterior[self.nsteps/2:] ==
np.max(self.LnPosterior[self.nsteps/2:]))[1][0]
plt.axvline(x=self.timescaleChain[dimx, loc0, loc1], c=r'#ffff00', label=r'Best %s'%(labelx))
plt.axhline(y=self.timescaleChain[dimy, loc0, loc1], c=r'#ffff00', label=r'Best %s'%(labely))
if median:
medx = np.median(self.timescaleChain[dimx, :, self.nsteps/2:])
medy = np.median(self.timescaleChain[dimy, :, self.nsteps/2:])
plt.axvline(x=medx, c=r'#ff00ff', label=r'Median %s'%(labelx))
plt.axhline(y=medy, c=r'#ff00ff', labely=r'Median %s'%(labely))
if truthx is not None:
plt.axvline(x=truthx, c=r'#000000')
if truthy is not None:
plt.axhline(y=truthy, c=r'#000000')
if labelx is not None:
plt.xlabel(labelx)
if labely is not None:
plt.ylabel(labely)
if doShow:
plt.show(False)
return newFig
def plotwalkers(self, dim, truth=None, label=None, fig=-7, doShow=False, clearFig=True):
newFig = plt.figure(fig, figsize=(fwid, fhgt))
if clearFig:
plt.clf()
if dim < self.ndims:
for i in range(self.nwalkers):
plt.plot(self.timescaleChain[dim, i, :], c=r'#0000ff', alpha=0.1)
plt.plot(np.median(self.timescaleChain[dim, :, :], axis=0), c=r'#ff0000')
plt.fill_between(range(self.nsteps),
np.median(self.timescaleChain[dim, :, :], axis=0) -
np.std(self.timescaleChain[dim, :, :], axis=0),
np.median(self.timescaleChain[dim, :, :], axis=0) +
np.std(self.timescaleChain[dim, :, :], axis=0),
color=r'#ff0000', alpha=0.1)
if truth is not None:
plt.axhline(truth, c=r'#000000')
plt.xlabel(r'step \#')
if label is not None:
plt.ylabel(label)
if doShow:
plt.show(False)
return newFig
def plottriangle(self, doShow=False, plot_contours=True, cmap='cubehelix'):
orbitChain = copy.copy(self.timescaleChain[0:self.r, :, self.nsteps/2:])
flatOrbitChain = np.swapaxes(orbitChain.reshape((self.r, -1), order='F'), axis1=0, axis2=1)
orbitLabels = [r'$a_{1}$ (pc)', r'$a_{2}$ (pc)', r'$T$ (d)', r'$e$', r'$\Omega$ (deg.)', r'$i$ (deg)',
r'$\tau$ (d)', r'$F$']
orbitExtents = [0.9, 0.9, (0.95*np.min(self.timescaleChain[2, :, self.nsteps/2:]),
1.05*np.max(self.timescaleChain[2, :, self.nsteps/2:])), 0.9, 0.9, 0.9,
0.9, (0.85*np.min(self.timescaleChain[7, :, self.nsteps/2:]),
1.15*np.max(self.timescaleChain[7, :, self.nsteps/2:]))]
newFigOrb = kali.util.triangle.corner(flatOrbitChain, labels=orbitLabels,
show_titles=True,
title_fmt='.2e',
quantiles=[0.16, 0.5, 0.84],
extents=orbitExtents,
plot_contours=plot_contours,
plot_datapoints=False,
plot_contour_lines=False,
pcolor_cmap=cm.get_cmap(cmap),
verbose=False)
stochasticChain = copy.copy(self.timescaleChain[self.r:, :, self.nsteps/2:])
flatStochasticChain = np.swapaxes(stochasticChain.reshape((self.ndims - self.r, -1), order='F'),
axis1=0, axis2=1)
stochasticLabels = []
for i in range(self.p):
stochasticLabels.append(r'$\tau_{\mathrm{AR,} %d}$ (d)'%(i + 1))
for i in range(self.q):
stochasticLabels.append(r'$\tau_{\mathrm{MA,} %d}$ (d)'%(i + 1))
stochasticLabels.append(r'$\mathrm{Amp.}$')
newFigSto = kali.util.triangle.corner(flatStochasticChain, labels=stochasticLabels,
show_titles=True,
title_fmt='.2e',
quantiles=[0.16, 0.5, 0.84],
plot_contours=plot_contours,
plot_datapoints=False,
plot_contour_lines=False,
pcolor_cmap=cm.get_cmap(cmap),
verbose=False)
auxChain = copy.copy(self.auxillaryChain[:, :, self.nsteps/2:])
flatAuxChain = np.swapaxes(auxChain.reshape((13, -1), order='F'), axis1=0, axis2=1)
auxLabels = [r'$a_{1}$ (pc)', r'$a_{2}$ (pc)', r'$T$ (d)', r'$e$',
r'$M_{12}$ ($10^{6} \times M_{\odot}$)', r'$M_{2}/M_{1}$',
r'$r_{\mathrm{Peribothron}}$ (pc)', r'$r_{\mathrm{Apobothron}}$ (pc)',
r'$r_{\mathrm{Schwarzschild}}$ (pc)',
r'$a_{\mathrm{Hard}}$ (pc)', r'$a_{\mathrm{GW}}$ (pc)', r'$T_{\mathrm{Hard}}$ (yr)',
r'$M_{\mathrm{Eject}}$ ($10^{6} \times M_{\odot}$)']
auxExtents = [0.9, 0.9, (0.95*np.min(self.auxillaryChain[2, :, self.nsteps/2:]),
1.05*np.max(self.auxillaryChain[2, :, self.nsteps/2:])), 0.9, 0.9, 0.9,
0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]
newFigAux = kali.util.triangle.corner(flatAuxChain, labels=auxLabels,
show_titles=True,
title_fmt='.2e',
quantiles=[0.16, 0.5, 0.84],
extents=auxExtents,
plot_contours=plot_contours,
plot_datapoints=False,
plot_contour_lines=False,
pcolor_cmap=cm.get_cmap(cmap),
verbose=False)
if doShow:
plt.show(False)
return newFigSto, newFigOrb, newFigAux
|
gpl-2.0
|
mrshu/scikit-learn
|
benchmarks/bench_sample_without_replacement.py
|
2
|
7902
|
"""
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Bench results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure()
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('time [s]')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
|
bsd-3-clause
|
albietz/stochs
|
examples/curves.py
|
1
|
3062
|
import math
import pickle
import numpy as np
import matplotlib.pyplot as plt
smoothing_wsize = 10 # must be odd for savgol_filter
smooth = False
def smoothing(epochs, accs):
accs = accs.copy()
ws = smoothing_wsize
for i in range(accs.shape[1]):
accs[ws//2:-ws//2,i] = np.convolve(accs[:,i], np.ones(ws)/ws, 'same')[ws//2:-ws//2]
# accs[:,i] = savgol_filter(accs[:,i], smoothing_wsize, 1)
return epochs[ws//2:-ws//2], accs[ws//2:-ws//2,:]
def algo_label(name, lr):
s = ''
if name.startswith('miso'):
s += 'S-MISO'
elif name.startswith('saga'):
s += 'N-SAGA'
else:
s += 'SGD'
if '_avg' in name:
s += '-AVG'
if '_nonu' in name:
s += '-NU'
if lr:
s += ' $\eta = {}$'.format(lr)
return s
def plot_test_acc(res, step=1, last=-10):
accs = np.array(res['test_accs'])
epochs = res['epochs']
if smooth:
epochs, accs = smoothing(epochs, accs)
plt.figure(figsize=(10, 7))
for i in range(accs.shape[1]):
p = res['params'][i]
plt.plot(epochs[:last:step], accs[:last:step,i],
label='{}{}'.format(p['name'], '_lr' + str(p['lr']) if 'lr' in p else ''))
plt.title('$\mu = {}$'.format(res['params'][0]['lmbda']))
plt.xlabel('epoch')
plt.ylabel('test accuracy')
plt.legend(loc='lower right')
def plot_loss(res, ty='train', log=False, step=1, last=-10, legend=True, ylabel=None,
small=True, fname=None, title='STL-10 ckn', filter_fn=None):
accs = np.array(res['{}_losses'.format(ty)])
if 'regs' in res and ty == 'train':
accs += np.array(res['regs'])
epochs = res['epochs']
if smooth:
epochs, accs = smoothing(epochs, accs)
# best = accs[:,2].min()
best = accs[:,:].min()
# if fname is None:
# plt.figure(figsize=(10, 7))
# else:
if small:
plt.figure(figsize=(4,2.2))
else:
plt.figure(figsize=(10,7))
for i in range(accs.shape[1]):
p = res['params'][i]
if filter_fn and filter_fn(p):
print('skipping', p['name'], p['lr'])
continue
if log:
plt.semilogy(epochs[:last:step], accs[:last:step,i] - best,
label=algo_label(p['name'], p.get('lr')),
linewidth=1.5)
else:
plt.plot(epochs[:last:step],
accs[:last:step,i], label=algo_label(p['name'], p.get('lr')))
plt.title(title)
plt.xlabel('epochs')
if ty == 'train':
plt.ylabel(ylabel or 'f - f*')
else:
plt.ylabel('{} {}loss'.format(ty, 'excess ' if log else ''))
if legend:
plt.legend(loc='upper right', fontsize=7)
if fname is not None:
plt.savefig(fname, format='pdf', bbox_inches='tight', pad_inches=0)
def plot_all(res, log=False, step=1, last=None):
plot_test_acc(res, step=step, last=last)
plot_loss(res, ty='train', log=log, step=step, last=last)
plot_loss(res, ty='test', log=log, step=step, last=last)
|
mit
|
dgasmith/EEX_scratch
|
eex/translators/gromacs/gromacs_read.py
|
1
|
2673
|
"""
GROMACS EEX I/O
"""
import pandas as pd
import os
import numpy as np
import eex
import logging
logger = logging.getLogger(__name__)
def read_gromacs_gro_file(dl, gro_folder, ffdir=None):
if ffdir is None:
if "GROMACS_DIR" in os.environ:
ffdir = os.environ["GROMACS_DIR"]
else:
raise KeyError("GROMACS read: Must provide `ffdir` if 'GROMACS_DIR' not in environmental variables.")
if not os.path.exists(ffdir):
raise OSError("GROMACS read: Could not find FF folder, expected at '%s'." % ffdir)
### Read in conf.gro file first
conf_fname = os.path.join(gro_folder, "conf.gro")
if not os.path.exists(conf_fname):
raise OSError("GROMACS read: Could not find conf.gro file, expected at '%s'." % conf_fname)
with open(conf_fname, 'r') as conf_file:
conf_title = next(conf_file)
conf_read_size = int(next(conf_file))
reader = pd.read_table(
conf_fname,
header=None,
iterator=True,
names=["atom_name", "atomic_symbol", "atom_index", "X", "Y", "Z"],
engine="c",
comment=";",
delim_whitespace=True,
skiprows=2)
data = reader.get_chunk(conf_read_size).dropna(axis=1, how="all")
data["atomic_number"] = 0
data.set_index("atom_index", inplace=True, drop=True)
for val, idx in data.groupby("atomic_symbol"):
data.loc[idx.index, "atomic_number"] = eex.metadata.atom_symbol_to_number[val]
dl.add_atoms(data)
# Get the box size
size = reader.get_chunk(1).dropna(axis=1, how="all")
half_box_length = size.values[0] / 2
box_size = {k: (-v, v) for k, v in zip(["x", "y", "z"], half_box_length)}
dl.set_box_size(box_size)
### Read in topol.top file next
top_fname = os.path.join(gro_folder, "topol.top")
if not os.path.exists(conf_fname):
raise OSError("GROMACS read: Could not find topol.top file, expected at '%s'." % conf_fname)
data = pd.read_table(
top_fname,
header=None,
# iterator=True,
names=range(9),
engine="c",
comment=";",
delim_whitespace=True)
print("")
data = data.loc[~data.iloc[:, 0].str.contains("#")]
indices = np.where(data.iloc[:, 0].str.contains("\["))[0]
for indx in range(indices.shape[0] - 1):
start = indices[indx]
end = indices[indx + 1]
label = data.iloc[start, 1]
tmp = data.iloc[(start + 1):end].convert_objects(convert_numeric=True).dropna(axis=1, how="all")
print(label)
print(tmp)
print('----')
# print(indices)
# print("")
# print(data)
print(os.environ)
|
bsd-3-clause
|
josephhardinee/PyDisdrometer
|
pydsd/tests/test_plot.py
|
3
|
2491
|
import numpy as np
import unittest
import matplotlib.pyplot as plt
from ..plot import plot
from ..aux_readers import ARM_JWD_Reader, NASA_2DVD_reader
class TestPlot(unittest.TestCase):
"Test module for the plot scripts"
def setUp(self):
filename = "testdata/sgpdisdrometerC1.b1.20110427.000000_test_jwd_b1.cdf"
self.dsd = ARM_JWD_Reader.read_arm_jwd_b1(filename)
def test_plot_dsd(self):
fig, ax = plot.plot_dsd(self.dsd)
plt.close()
def test_plot_dsd_nasa_2dvd(self):
filename = "testdata/nasa_gv_mc3e_2dvd_test.txt"
self.dsd = NASA_2DVD_reader.read_2dvd_dsd_nasa_gv(filename)
fig, ax = plot.plot_dsd(self.dsd)
plt.close()
def test_plot_NwD0(self):
self.dsd.calculate_dsd_parameterization() # Bad form to rely on this, but not a better way right now.
fig, ax = plot.plot_NwD0(self.dsd)
plt.close()
def test_plot_ZR(self):
self.dsd.calculate_radar_parameters()
fig, ax = plot.plot_ZR(self.dsd)
plt.close()
def test_plot_ZR_hist2d(self):
self.dsd.calculate_radar_parameters()
fig, ax = plot.plot_ZR_hist2d(self.dsd)
plt.close()
def test_scatter(self):
x = self.dsd.fields["rain_rate"][
"data"
] # Visually not the best example, but avoids call to dsd parameterization
y = self.dsd.fields["rain_rate"]["data"]
fig, ax = plot.scatter(x, y)
plt.close()
def test_plot_hist2d(self):
x = self.dsd.fields["rain_rate"]["data"]
y = self.dsd.fields["rain_rate"]["data"]
fig, ax = plot.plot_hist2d(x, y)
plt.close()
def test_plot_ts(self):
fig, ax = plot.plot_ts(self.dsd, "Nd")
plt.close()
# def test_plotHov(self):
# self.dsd.calculate_dsd_parameterization() # Bad form to rely on this, but not a better way right now.
# fig, ax = plot.plotHov(self.dsd, 'D0', 'Nd')
# plt.close()
#
# def test_plot_hexbin(self):
# self.dsd.calculate_dsd_parameterization() # Bad form to rely on this, but not a better way right now.
# x = self.dsd.fields['Nd']['data']
# y = self.dsd.fields['D0']['data']
# fig, ax = plot.plot_hexbin(x, y)
# plt.close()
def test_methods(self):
# Should we test every submethod?
fig, ax = plot.plot_dsd(self.dsd)
plot.set_ax_limits(xlim=(0, 100), ylim=(0, 100), ax=ax)
plt.close()
|
lgpl-2.1
|
bionet/ted.python
|
demos/asdm_rt_demo.py
|
1
|
2449
|
#!/usr/bin/env python
"""
Demos for real-time ASDM time encoding and decoding algorithms.
"""
# Copyright (c) 2009-2015, Lev Givon
# All rights reserved.
# Distributed under the terms of the BSD license:
# http://www.opensource.org/licenses/bsd-license
import sys
import numpy as np
# Set matplotlib backend so that plots can be generated without a
# display:
import matplotlib
matplotlib.use('AGG')
from bionet.utils.misc import func_timer
import bionet.utils.band_limited as bl
import bionet.utils.plotting as pl
import bionet.ted.asdm as asdm
import bionet.ted.rt as rt
# For determining output plot file names:
output_name = 'asdm_rt_demo_'
output_count = 0
output_ext = '.png'
# Define algorithm parameters and input signal:
dur = 0.1
dt = 1e-6
f = 32
bw = 2*np.pi*f
t = np.arange(0, dur, dt)
np.random.seed(0)
noise_power = None
if noise_power == None:
fig_title = 'ASDM Input Signal with No Noise'
else:
fig_title = 'ASDM Input Signal with %d dB of Noise' % noise_power
print fig_title
u = func_timer(bl.gen_band_limited)(dur, dt, f, noise_power)
pl.plot_signal(t, u, fig_title,
output_name + str(output_count) + output_ext)
# Define encoding parameters:
dte = dt
quad_method = 'trapz'
b = 3.5 # bias
d = 0.7 # threshold
k = 0.01 # scaling factor
# Define real time decoder stitching parameters:
N = 10
M = 2
K = 1
try:
asdm.asdm_recoverable(u, bw, b, d, k)
except ValueError('reconstruction condition not satisfied'):
sys.exit()
output_count += 1
fig_title = 'Signal Encoded Using Real-Time ASDM Encoder'
print fig_title
encoder = rt.ASDMRealTimeEncoder(dt, b, d, k)
s = func_timer(encoder)(u)
pl.plot_encoded(t, u, s, fig_title,
output_name + str(output_count) + output_ext)
output_count += 1
fig_title = 'Signal Decoded Using Real-Time ASDM Decoder'
print fig_title
decoder = rt.ASDMRealTimeDecoder(dt, bw, b, d, k, N, M, K)
u_rec = func_timer(decoder)(s)
end = min(len(u), len(u_rec))
pl.plot_compare(t[:end], u[:end], u_rec[:end], fig_title,
output_name + str(output_count) + output_ext)
output_count += 1
fig_title = 'Signal Decoded Using Real-Time\nThreshold-Insensitive ASDM Decoder'
print fig_title
decoder = rt.ASDMRealTimeDecoderIns(dt, bw, b, N, M, K)
u_rec_ins = func_timer(decoder)(s)
end = min(len(u), len(u_rec_ins))
pl.plot_compare(t[:end], u[:end], u_rec_ins[:end], fig_title,
output_name + str(output_count) + output_ext)
|
bsd-3-clause
|
arasuarun/shogun
|
examples/undocumented/python_modular/graphical/classifier_perceptron_graphical.py
|
26
|
2311
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import latex_plot_inits
parameter_list = [[20, 5, 1., 1000, 1, None, 5], [100, 5, 1., 1000, 1, None, 10]]
def classifier_perceptron_graphical(n=100, distance=5, learn_rate=1., max_iter=1000, num_threads=1, seed=None, nperceptrons=5):
from modshogun import RealFeatures, BinaryLabels
from modshogun import Perceptron
from modshogun import MSG_INFO
# 2D data
_DIM = 2
# To get the nice message that the perceptron has converged
dummy = BinaryLabels()
dummy.io.set_loglevel(MSG_INFO)
np.random.seed(seed)
# Produce some (probably) linearly separable training data by hand
# Two Gaussians at a far enough distance
X = np.array(np.random.randn(_DIM,n))+distance
Y = np.array(np.random.randn(_DIM,n))
label_train_twoclass = np.hstack((np.ones(n), -np.ones(n)))
fm_train_real = np.hstack((X,Y))
feats_train = RealFeatures(fm_train_real)
labels = BinaryLabels(label_train_twoclass)
perceptron = Perceptron(feats_train, labels)
perceptron.set_learn_rate(learn_rate)
perceptron.set_max_iter(max_iter)
perceptron.set_initialize_hyperplane(False)
# Find limits for visualization
x_min = min(np.min(X[0,:]), np.min(Y[0,:]))
x_max = max(np.max(X[0,:]), np.max(Y[0,:]))
y_min = min(np.min(X[1,:]), np.min(Y[1,:]))
y_max = max(np.max(X[1,:]), np.max(Y[1,:]))
for i in xrange(nperceptrons):
# Initialize randomly weight vector and bias
perceptron.set_w(np.random.random(2))
perceptron.set_bias(np.random.random())
# Run the perceptron algorithm
perceptron.train()
# Construct the hyperplane for visualization
# Equation of the decision boundary is w^T x + b = 0
b = perceptron.get_bias()
w = perceptron.get_w()
hx = np.linspace(x_min-1,x_max+1)
hy = -w[1]/w[0] * hx
plt.plot(hx, -1/w[1]*(w[0]*hx+b))
# Plot the two-class data
plt.scatter(X[0,:], X[1,:], s=40, marker='o', facecolors='none', edgecolors='b')
plt.scatter(Y[0,:], Y[1,:], s=40, marker='s', facecolors='none', edgecolors='r')
# Customize the plot
plt.axis([x_min-1, x_max+1, y_min-1, y_max+1])
plt.title('Rosenblatt\'s Perceptron Algorithm')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
return perceptron
if __name__=='__main__':
print('Perceptron graphical')
classifier_perceptron_graphical(*parameter_list[0])
|
gpl-3.0
|
febert/DeepRL
|
PolicyGradient/pg_function_approx_actor_critic.py
|
1
|
18451
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import time
import math
import cPickle
import gym as gym
class mountain_car():
def __init__(self,
gamma=0.99,
init_alpha=1e-4,
constant_alpha=False,
type_features=2,
init_epsilon=1.0,
update_epsilon=True,
policy_mode='stochastic',
N_0=50.,
random_init_theta=False,
negative_gradient = False,
environment = 'MountainCar-v0',
lambda_ = 0.5
):
if negative_gradient:
self.baseline_enable = 0
self.gradient_sign = -1
else:
self.baseline_enable = 1
self.gradient_sign = 1
self.env = gym.make(environment)
self.num_actions = self.env.action_space.n
self.statedim = self.env.observation_space.shape[0]
self.type_features = type_features
## learning rate
self.init_alpha = init_alpha
self.alpha = init_alpha
self.constant_alpha = constant_alpha
# lengths of all the played episodes
self.episode_lengths = []
# running average of the mean value of the episodes' steps
self.mean_value_fcn = 0.0
# lengths of all the tested episodes TODO
self.test_lengths = []
## policy parameters initialization
#tile features:
self.tile_resolution = 10
self.num_tile_features = pow(self.tile_resolution,self.statedim)*2
#for the value function estimation:
self.v = np.zeros(self.num_tile_features)
self.eligibility_vector = np.zeros(self.num_tile_features)
self.eligibility_vector_theta = np.zeros(self.num_tile_features*self.num_actions)
self.lambda_ = lambda_
if random_init_theta:
# random initialization
self.theta = np.random.randn(self.num_tile_features * self.num_actions)*0.1
else:
# zero initialization
self.theta = np.zeros(self.num_tile_features * self.num_actions)
## stochastic or deterministic softmax-based actions
self.policy_mode = policy_mode
## exploration parameters
# too much exploration is wrong!!!
self.epsilon = init_epsilon # explore probability
self.update_epsilon = update_epsilon
self.total_runs = 0.
# too long episodes give too much negative reward!!!!
# self.max_episode_length = 1000000
# ----> Use gamma!!!!! TODO: slower decrease?
self.gamma = gamma #similar to 0.9
self.N_0 = N_0
print('N_0',self.N_0)
print('init alpha',self.init_alpha)
print('Constant Alpha', constant_alpha)
print('lambda',self.lambda_)
def get_tile_feature(self, state):
high = self.env.observation_space.high
obs_dim = self.env.observation_space.shape[0] #dimension of observation space
low = self.env.observation_space.low
numactions = self.env.action_space.n
stepsize = (high - low)/self.tile_resolution
ind = np.floor((state-low)/stepsize).astype(int)
ind[ind>=self.tile_resolution]=self.tile_resolution-1 #bound the index so that it doesn't exceed bounds
ind = tuple(ind)
ind_shift = np.floor((state-low+stepsize/2)/stepsize).astype(int)
ind_shift[ind_shift>=self.tile_resolution]=self.tile_resolution-1 #bound the index so that it doesn't exceed bounds
ind_shift = tuple(ind_shift)
grid = np.zeros(np.ones(obs_dim)*self.tile_resolution)
try:
grid[ind] = 1
except IndexError, error:
print(error)
print("ind", ind)
print("state", state)
print("high", high)
print("low", low)
return
grid_shift = np.zeros(np.ones(obs_dim)*self.tile_resolution)
grid_shift[ind_shift] = 1
flatgrid = np.concatenate((grid,grid_shift), axis= 0).flatten()
return flatgrid
def get_full_feature(self,state):
flatgrid = self.get_tile_feature(state)
length_flatgrid = flatgrid.shape[0]
full_feature = np.zeros((self.num_actions,length_flatgrid*self.num_actions))
for action in range(self.num_actions):
full_feature[action,length_flatgrid*action: (action+1)*length_flatgrid] = flatgrid
return full_feature
# TODO: better solution to over-/underflows? Maybe not needed if proper learning?
def eval_action_softmax_probabilities(self, full_feature):
#softmax probability
activation = full_feature.dot(self.theta)
activation-= np.max(activation)
# trying to workaround over and underflows
solved = False
while (not solved):
solved = True
try:
prob_distrib = np.exp(activation)
prob_distrib /= np.sum(prob_distrib)
except FloatingPointError, error:
solved = False
# print("potential error in softmax")
print(error)
# print(".", end="")
prob_distrib[np.log(prob_distrib)<-100] =0 # print("activation ", activation)
return prob_distrib
#epsilon-greedy but deterministic or stochastic is a choice
def policy(self, state, mode='deterministic'):
explore = bool(np.random.choice([1,0],p=[self.epsilon, 1-self.epsilon]))
features = self.get_full_feature(state)
# print(explore, features, end="")
if mode=='deterministic' and not explore:
# print('deterministic')
return np.argmax(features.dot(self.theta))
elif mode=='stochastic' and not explore:
prob_distrib = self.eval_action_softmax_probabilities(self.get_full_feature(state))
# print('stochastic', prob_distrib)
return np.random.choice(np.arange(self.num_actions),p=prob_distrib)
elif explore:
# print('explore')
return self.env.action_space.sample()
def run_episode(self, enable_render=False, limit=100000):
episode = []
state = self.env.reset()
count = 0
done = False
while ( not done ):
if len(episode)>limit:
return []
count += 1
action = self.policy(state, mode=self.policy_mode)
state, reward, done, info = self.env.step(action)
episode.append((state, action, reward))
if enable_render: self.env.render()
# if count > self.max_episode_length: break;
if enable_render: print("This episode took {} steps".format(count))
return episode
# gradient of the policy function
def score_function(self, state, action):
full_features = self.get_full_feature(state)
prob_distrib = self.eval_action_softmax_probabilities(full_features)
try:
score = full_features[action] - prob_distrib.dot(full_features)
# print("score neg")
# print(score)
except FloatingPointError, error:
print(error)
print("features", full_features)
print("prob_distrib", prob_distrib)
return score
def numerical_score_function(self, state, action, delta=1e-8):
# backup value
save_theta = np.array(self.theta)
# base value
log_prob = np.log(self.eval_action_softmax_probabilities(state))[action]
score = np.zeros_like(self.theta)
# apply delta to every component of theta
for index, th in np.ndenumerate(self.theta):
self.theta[index] += delta
score[index] = ( np.log(self.eval_action_softmax_probabilities(state))[action] - log_prob ) / delta
# restore value
self.theta[index] = save_theta[index]
return score
def train(self, iter=1000, dataname = 'unnamed_data', save = False):
# fig = plt.figure()
for it in range(iter):
print(it, end=" ")
# run episode
episode = self.run_episode(enable_render=False)
self.total_runs += 1.0
# keep track of training episode lengths
self.episode_lengths.append(len(episode))
if (it+1)%1 == 0:
# output training info
print("EPISODE #{}".format(self.total_runs))
print("with a exploration of {}%".format(self.epsilon*100))
print("and learning rate of {}".format(self.alpha))
print("lasted {0} steps".format(len(episode)))
# do a test run
save_policy_mode = self.policy_mode
save_epsilon = self.epsilon
# print(self.policy_mode)
self.policy_mode = "deterministic"
self.epsilon = 0.
limit = 10000
det_episode = self.run_episode(limit=limit)
if det_episode == []:
len_episode = limit
else:
len_episode = len(det_episode)
self.test_lengths.append(len_episode)
self.policy_mode = save_policy_mode
self.epsilon = save_epsilon
tile_features_mat = np.zeros((len(episode), self.num_tile_features))
for idx in range(len(episode)):
tile_features_mat[idx,:] = self.get_tile_feature(episode[idx][0])
#offline td-lambda for estimating the value function
if not(len(episode)==0):
(state, action, reward) = episode[0]
self.eligibiltiy_vector = np.zeros(self.num_tile_features)
self.eligibility_vector_theta = np.zeros(self.num_tile_features*self.num_actions)
for idx in range(1,len(episode)):
(state, action, reward) = episode[idx]
Vs = tile_features_mat[idx-1].dot(self.v)
Vs_prime = tile_features_mat[idx].dot(self.v)
delta_t = reward + self.gamma*Vs_prime - Vs
self.eligibiltiy_vector = self.eligibiltiy_vector*self.gamma*self.lambda_ + tile_features_mat[idx]
self.v += 1e-3*delta_t*self.eligibiltiy_vector
self.eligibility_vector_theta = self.eligibility_vector_theta*self.lambda_ + self.score_function(state,action)
self.theta += self.alpha*self.eligibility_vector_theta*delta_t
if (it+1)%1 == 0:
print("theta")
print(self.theta)
self.plot_policy(mode= 'stochastic')
self.plot_policy(mode= 'deterministic')
self.plot_eligibility()
self.plot_value_function()
if (it+1)%100 == 0:
self.plot_training()
self.plot_testing()
self.plot_value_function()
# decrease exploration
if self.update_epsilon:
self.epsilon = self.N_0 / (self.N_0 + self.total_runs)
# decrease alpha
if not self.constant_alpha:
self.alpha = self.init_alpha / np.sqrt(self.total_runs)
if save:
self.savedata(dataname=dataname)
return self.theta
def plot_eligibility(self):
e_vector = self.eligibiltiy_vector[0:self.num_tile_features/2] #just visualize first half
print('plotting the eligibility traces')
obs_low = self.env.observation_space.low
obs_high = self.env.observation_space.high
# values to evaluate policy at
x_range = np.linspace(obs_low[0], obs_high[0], self.tile_resolution)
v_range = np.linspace(obs_low[1], obs_high[1], self.tile_resolution)
# get actions in a grid
e_mat = e_vector.reshape((self.tile_resolution,self.tile_resolution))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y = np.meshgrid(x_range, v_range)
ax.plot_wireframe(X,Y, e_mat)
ax.set_xlabel("x")
ax.set_ylabel("v")
ax.set_zlabel("eligibility")
plt.show()
def plot_value_function(self):
print('plotting the value function')
obs_low = self.env.observation_space.low
obs_high = self.env.observation_space.high
# values to evaluate policy at
x_range = np.linspace(obs_low[0], obs_high[0]-0.01, self.tile_resolution*3)
v_range = np.linspace(obs_low[1], obs_high[1]-0.01, self.tile_resolution*3)
# get actions in a grid
value_func = np.zeros((x_range.shape[0], v_range.shape[0]))
for i, state1 in enumerate(x_range):
for j, state2 in enumerate(v_range):
# print(np.argmax(self.get_features((x,v)).dot(self.theta)), end="")
value_func[i,j] = -self.v.dot(self.get_tile_feature((state1,state2)))
print("")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y = np.meshgrid(x_range, v_range)
ax.plot_wireframe(X,Y, value_func)
ax.set_xlabel("x")
ax.set_ylabel("v")
ax.set_zlabel("negative value")
plt.show()
def savedata(self, dataname):
output = open(dataname, 'wb')
cPickle.dump(self.theta, output)
cPickle.dump(self.episode_lengths, output)
cPickle.dump(self.test_lengths, output)
cPickle.dump(self.v,output)
output.close()
def loaddata(self, dataname):
pkl_file = open(dataname, 'rb')
self.theta = cPickle.load(pkl_file)
self.episode_lengths = cPickle.load(pkl_file)
self.test_lengths = cPickle.load(pkl_file)
self.v = cPickle.load(pkl_file)
print( self.theta)
print( self.episode_lengths)
print( self.episode_lengths)
pkl_file.close()
def plot_policy(self, resolution=100, mode= 'stochastic'):
# backup of value
save_epsilon = self.epsilon
self.epsilon = 0.0 # no exploration
obs_low = self.env.observation_space.low
obs_high = self.env.observation_space.high
# values to evaluate policy at
x_range = np.linspace(obs_low[0], obs_high[0], resolution)
v_range = np.linspace(obs_low[1], obs_high[1], resolution)
# get actions in a grid
greedy_policy = np.zeros((resolution, resolution))
for i, x in enumerate(x_range):
for j, v in enumerate(v_range):
# print(np.argmax(self.get_features((x,v)).dot(self.theta)), end="")
greedy_policy[i,j] = self.policy((x,v),mode)
print("")
# plot policy
fig = plt.figure()
plt.imshow(greedy_policy,
cmap=plt.get_cmap('gray'),
interpolation='none',
extent=[obs_low[1],obs_high[1],obs_high[0],obs_low[0]],
aspect="auto")
plt.xlabel("velocity")
plt.ylabel("position")
plt.show()
# restore value
self.epsilon = save_epsilon
def plot_training(self):
fig = plt.figure()
plt.plot(self.episode_lengths)
plt.yscale('log')
plt.show()
def plot_testing(self):
fig = plt.figure()
plt.plot(self.test_lengths)
plt.yscale('log')
plt.show()
def compare_gradients(self,):
self.theta = np.array([[-0.01338339, -0.01746333 , 0.03084672],[-0.38015887 , 0.00830843, 0.37185044]])
numepisodes = 200
self.epsilon = 0 #the exploration has to be set to zero!
self.policy_mode = 'deterministic' # could work with stocastic as well
self.compute_with_policy_gradient_theorem(numepisodes)
self.compute_numeric_gradient(numepisodes)
def compute_with_policy_gradient_theorem(self, numepisodes):
accum_values = np.zeros_like(self.theta)
for epi_number in range(numepisodes):
episode = self.run_episode()
#print(self.policy_mode)
value_fcn = 0
for idx in range(len(episode),0,-1):
(state, action, reward) = episode[idx-1]
value_fcn = reward + self.gamma*value_fcn
accum_values += self.score_function(state,action)*value_fcn
#print(len(episode))
average_gradient = accum_values/numepisodes
print('average gradient computed with policy gradient theorem')
print('result after %d episodes:'%(numepisodes))
print(average_gradient)
def compute_numeric_gradient(self,numepisodes):
average_numeric_gradient = np.zeros_like(self.theta)
numeric_gradient_accum = np.zeros_like(self.theta)
theta_saved = np.array(self.theta)
before = 0.0
for epi_number in range(numepisodes):
before -= len(self.run_episode()) # compute length of episode before applying perturbations
before=float(before)/numepisodes
print('average episode length before perturbation',before)
for i in range(self.theta.shape[0]):
for j in range(self.theta.shape[1]):
for epi_number in range(numepisodes):
perturbation = 1e-5
# perturb the selected parameter
self.theta = np.array(theta_saved)
self.theta[i][j]+= perturbation
after = -len(self.run_episode())
#print('episode length after perturbing element %d,%d : %d' % (i,j,after) )
numeric_gradient_accum[i][j]+= float(after-before)/perturbation
print(i, j)
print( numeric_gradient_accum)
average_numeric_gradient[i][j] = numeric_gradient_accum[i][j] /numepisodes
print('average gradient computed numerically')
print('result after %d episodes:'% (numepisodes))
print(average_numeric_gradient)
self.theta = np.array(theta_saved)
#car1 = mountain_car(init_alpha=1e-3)
#state = (1,1)
#action = 0
#print('grad theorm score:',car1.score_function(state,action))
#print('numeric score:',car1.numerical_score_function(state,action))
#car1.train(iter=1000)
|
gpl-3.0
|
alistairewj/reproducibility-mimic
|
notebooks/mp_utils.py
|
1
|
24707
|
# Import libraries
import numpy as np
import pandas as pd
import psycopg2
import sys
import datetime as dt
from sklearn import metrics
import matplotlib.pyplot as plt
# default colours for prettier plots
col = [[0.9047, 0.1918, 0.1988],
[0.2941, 0.5447, 0.7494],
[0.3718, 0.7176, 0.3612],
[1.0000, 0.5482, 0.1000],
[0.4550, 0.4946, 0.4722],
[0.6859, 0.4035, 0.2412],
[0.9718, 0.5553, 0.7741],
[0.5313, 0.3359, 0.6523]];
marker = ['v','o','d','^','s','o','+']
ls = ['-','-','-','-','-','s','--','--']
def generate_times(df, T=None, T_to_death=None, seed=None, censor=False):
# generate a dictionary based off of the analysis type desired
# creates "windowtime" - the time at the end of the window
# df needs to have the following fields:
# icustay_id (not as an index)
# dischtime_hours
# deathtime_hours
# censortime_hours (if censoring with censor=True)
# these times are relative to ICU intime ("_hours" means hours after ICU admit)
if seed is None:
print('Using default seed 111.')
seed=111
# create endtime: this is the last allowable time for our window
df['endtime'] = df['dischtime_hours']
# if they die before discharge, set the end time to the time of death
idx = (~df['deathtime_hours'].isnull()) & (df['deathtime_hours']<df['dischtime_hours'])
df.loc[idx,'endtime'] = df.loc[idx,'deathtime_hours']
# optionally censor the data
# this involves updating the endtime to an even earlier time, if present
# e.g. the first time a patient was made DNR
if censor:
idx = (~df['censortime_hours'].isnull()) & (df['censortime_hours']<df['endtime'])
df.loc[idx,'endtime'] = df.loc[idx,'censortime_hours']
# now generate the end of the window
# this is X hours
np.random.seed(seed)
tau = np.random.rand(df.shape[0])
# T adds a bit of fuzziness to prevent information leakage
if T is not None:
# extract window at least T hours before discharge/death
df['windowtime'] = np.floor(tau*(df['endtime']-T))
# if the stay is shorter than T hours, the interval can be negative
# in this case, we set the interval to 0
# usually, this will mean we only have lab data
df.loc[df['windowtime']<0, 'windowtime'] = 0
else:
df['windowtime'] = np.floor(tau*(df['endtime']))
if T_to_death is not None:
# fix the time for those who die to be T_to_death hours from death
# first, isolate patients where they were in the ICU T hours before death
idxInICU = (df['deathtime_hours'] - df['dischtime_hours'])<=T_to_death
# for these patients, set the time to be T_to_death hours
df.loc[idxInICU, 'windowtime'] = df.loc[idxInICU,'deathtime_hours'] - T_to_death
windowtime_dict = df.set_index('icustay_id')['windowtime'].to_dict()
return windowtime_dict
def generate_times_before_death(df, T=None, T_to_death=None, seed=None):
# generate a dictionary based off of the analysis type desired
# creates "windowtime" - the time at the end of the window
# df needs to have the following fields:
# icustay_id (not as an index)
# dischtime_hours
# deathtime_hours
if seed is None:
print('Using default seed 111.')
seed=111
df['endtime'] = df['dischtime_hours']
idx = (~df['deathtime_hours'].isnull()) & (df['deathtime_hours']<df['dischtime_hours'])
df.loc[idx,'endtime'] = df.loc[idx,'deathtime_hours']
np.random.seed(seed)
# df is centered on intime (as t=0)
# we need to ensure a random time is at least T hours from death/discharge
tau = np.random.rand(df.shape[0])
if T is not None:
# extract window at least T hours before discharge/death
df['windowtime'] = np.floor(tau*(df['endtime']-T))
# if the stay is shorter than T hours, the interval can be negative
# in this case, we set the interval to 0
df.loc[df['windowtime']<0, 'windowtime'] = 0
else:
df['windowtime'] = np.floor(tau*(df['endtime']))
if T_to_death is not None:
# fix the time for those who die to be T_to_death hours from death
# first, isolate patients where they were in the ICU T hours before death
idxInICU = (df['deathtime_hours'] - df['dischtime_hours'])<=T_to_death
# for these patients, set the time to be T_to_death hours
df.loc[idxInICU, 'windowtime'] = df.loc[idxInICU,'deathtime_hours'] - T_to_death
windowtime_dict = df.set_index('icustay_id')['windowtime'].to_dict()
return windowtime_dict
# pretty confusion matrices!
def print_cm(y, yhat):
print('\nConfusion matrix')
cm = metrics.confusion_matrix(y, yhat)
TN = cm[0,0]
FP = cm[0,1]
FN = cm[1,0]
TP = cm[1,1]
N = TN+FP+FN+TP
print(' \t{:6s}\t{:6s}').format('yhat=0','yhat=1')
print('y=0\t{:6g}\t{:6g}\tSpec={:2.2f}').format(cm[0,0],cm[0,1], 100.0*TN/(TN+FP)) # Spec
print('y=1\t{:6g}\t{:6g}\tSens={:2.2f}').format(cm[1,0],cm[1,1], 100.0*TP/(TP+FN)) # Sens
# add sensitivity/specificity as the bottom line
print(' \t{:2.2f}\t{:2.2f}\t Acc={:2.2f}').format(100.0*TN / (TN+FN), 100.0*TP / (TP+FP), 100.0*(TP+TN)/N)
print(' \tNPV\tPPV')
# these define 5 lists of variable names
# these are the variables later used for prediction
def vars_of_interest():
# we extract the min/max for these covariates
var_min = ['heartrate', 'sysbp', 'diasbp', 'meanbp',
'resprate', 'tempc', 'spo2']
var_max = var_min
var_min.append('gcs')
#var_max.extend(['rrt','vasopressor','vent'])
# we extract the first/last value for these covariates
var_first = ['heartrate', 'sysbp', 'diasbp', 'meanbp',
'resprate', 'tempc', 'spo2']
var_last = var_first
var_last.extend(['gcsmotor','gcsverbal','gcseyes','endotrachflag','gcs'])
var_first_early = ['bg_po2', 'bg_pco2', #'bg_so2'
#'bg_fio2_chartevents', 'bg_aado2_calc',
#'bg_fio2', 'bg_aado2',
'bg_pao2fio2ratio', 'bg_ph', 'bg_baseexcess', #'bg_bicarbonate',
'bg_totalco2', #'bg_hematocrit', 'bg_hemoglobin',
'bg_carboxyhemoglobin', 'bg_methemoglobin',
#'bg_chloride', 'bg_calcium', 'bg_temperature',
#'bg_potassium', 'bg_sodium', 'bg_lactate',
#'bg_glucose',
# 'bg_tidalvolume', 'bg_intubated', 'bg_ventilationrate', 'bg_ventilator',
# 'bg_peep', 'bg_o2flow', 'bg_requiredo2',
# begin lab values
'aniongap', 'albumin', 'bands', 'bicarbonate', 'bilirubin', 'creatinine',
'chloride', 'glucose', 'hematocrit', 'hemoglobin', 'lactate', 'platelet',
'potassium', 'ptt', 'inr', 'sodium', 'bun', 'wbc']
var_last_early = var_first_early
# fourth set of variables
# we have special rules for these...
var_sum = ['urineoutput']
var_static = [u'is_male', u'emergency_admission', u'age',
# services
u'service_any_noncard_surg',
u'service_any_card_surg',
u'service_cmed',
u'service_traum',
u'service_nmed',
# ethnicities
u'race_black',u'race_hispanic',u'race_asian',u'race_other',
# demographics
u'height', u'weight', u'bmi']
return var_min, var_max, var_first, var_last, var_sum, var_first_early, var_last_early, var_static
def vars_of_interest_streaming():
# define the covariates to be used in the model
# these covariates are available in the hospital stream at the moment
# biggest exclusion compared to the above is no GCS.
var_min = ['heartrate', 'systolicbp', 'diastolicbp', 'meanbp',
'resprate', 'spo2'] # , 'temp', 'glucosecharted'
var_max = var_min
#var_max.extend(['rrt','vasopressor','vent'])
#var_min.append('gcs')
# we extract the first/last value for these covariates
var_first = ['heartrate', 'systolicbp', 'diastolicbp', 'meanbp',
'resprate', 'spo2'] # , 'temp', 'glucosecharted'
var_last = var_first
#var_last.extend(['gcsmotor','gcsverbal','gcseyes','endotrachflag','gcs'])
var_first_early = ['bg_so2', 'bg_po2', 'bg_pco2', #'bg_fio2_chartevents', 'bg_aado2_calc',
'bg_fio2', 'bg_aado2', 'bg_pao2fio2', 'bg_ph', 'bg_baseexcess', 'bg_bicarbonate',
'bg_totalco2', 'bg_hematocrit', 'bg_hemoglobin', 'bg_carboxyhemoglobin', 'bg_methemoglobin',
'bg_chloride', 'bg_calcium', 'bg_temperature', 'bg_potassium', 'bg_sodium', 'bg_lactate',
'bg_glucose',
# 'bg_intubated', 'bg_ventilationrate', 'bg_ventilator', # these vars are usually NaN
# 'bg_tidalvolume', 'bg_peep', 'bg_o2flow', 'bg_requiredo2',
# begin lab values
'aniongap', 'albumin', 'bands', 'bicarbonate', 'bilirubin', 'creatinine',
'chloride', 'glucose', 'hematocrit', 'hemoglobin', 'lactate', 'platelet',
'potassium', 'ptt', 'inr', 'pt', 'sodium', 'bun', 'wbc']
var_last_early = var_first_early
# fourth set of variables
# we have special rules for these...
var_sum = None #['urineoutput']
return var_min, var_max, var_first, var_last, var_sum, var_first_early, var_last_early
def get_design_matrix(df, time_dict, W=8, W_extra=24):
# W_extra is the number of extra hours to look backward for labs
# e.g. if W_extra=24 we look back an extra 24 hours for lab values
# timing info for icustay_id < 200100:
# 5 loops, best of 3: 877 ms per loop
# timing info for all icustay_id:
# 5 loops, best of 3: 1.48 s per loop
# get the hardcoded variable names
var_min, var_max, var_first, var_last, var_sum, var_first_early, var_last_early, var_static = vars_of_interest()
tmp = np.asarray(time_dict.items()).astype(int)
N = tmp.shape[0]
M = W+W_extra
# create a vector of [0,...,M] to represent the hours we need to subtract for each icustay_id
hr = np.linspace(0,M,M+1,dtype=int)
hr = np.reshape(hr,[1,M+1])
hr = np.tile(hr,[N,1])
hr = np.reshape(hr, [N*(M+1),], order='F')
# duplicate tmp to M+1, as we will be creating T+1 rows for each icustay_id
tmp = np.tile(tmp,[M+1,1])
tmp_early_flag = np.copy(tmp[:,1])
# adding hr to tmp[:,1] gives us what we want: integers in the range [Tn-T, Tn]
tmp = np.column_stack([tmp[:,0], tmp[:,1]-hr, hr>W])
# create dataframe with tmp
df_time = pd.DataFrame(data=tmp, index=None, columns=['icustay_id','hr','early_flag'])
df_time.sort_values(['icustay_id','hr'],inplace=True)
# merge df_time with df to filter down to a subset of rows
df = df.merge(df_time, left_on=['icustay_id','hr'], right_on=['icustay_id','hr'],how='inner')
# apply functions to groups of vars
df_first_early = df.groupby('icustay_id')[var_first_early].first()
df_last_early = df.groupby('icustay_id')[var_last_early].last()
# slice down df_time by removing early times
# isolate only have data from [t - W, t - W + 1, ..., t]
df = df.loc[df['early_flag']==0,:]
df_first = df.groupby('icustay_id')[var_first].first()
df_last = df.groupby('icustay_id')[var_last].last()
df_min = df.groupby('icustay_id')[var_min].min()
df_max = df.groupby('icustay_id')[var_max].max()
df_sum = df.groupby('icustay_id')[var_sum].sum()
# update the column names
df_first.columns = [x + '_first' for x in df_first.columns]
df_last.columns = [x + '_last' for x in df_last.columns]
df_first_early.columns = [x + '_first_early' for x in df_first_early.columns]
df_last_early.columns = [x + '_last_early' for x in df_last_early.columns]
df_min.columns = [x + '_min' for x in df_min.columns]
df_max.columns = [x + '_max' for x in df_max.columns]
df_sum.columns = [x + '_sum' for x in df_sum.columns]
# now combine all the arrays together
df_data = pd.concat([df_first, df_first_early, df_last, df_last_early, df_min, df_max, df_sum], axis=1)
return df_data
# this function is used to print out data for a single pt
# mainly used for debugging weird inconsistencies in data extraction
# e.g. "wait why does this icustay_id not have heart rate?"
def debug_for_iid(df, time_dict, iid, T=8, W_extra=24):
#tmp = np.asarray(time_dict.items()).astype(int)
tmp = np.asarray([iid, time_dict[iid]]).astype(int)
tmp = np.reshape(tmp,[1,2])
N = tmp.shape[0]
M = W+W_extra
# create a vector of [0,...,M] to represent the hours we need to subtract for each icustay_id
hr = np.linspace(0,M,M+1,dtype=int)
hr = np.reshape(hr,[1,M+1])
hr = np.tile(hr,[N,1])
hr = np.reshape(hr, [N*(M+1),], order='F')
# duplicate tmp to M+1, as we will be creating T+1 rows for each icustay_id
tmp = np.tile(tmp,[M+1,1])
tmp_early_flag = np.copy(tmp[:,1])
# adding hr to tmp[:,1] gives us what we want: integers in the range [Tn-T, Tn]
tmp = np.column_stack([tmp[:,0], tmp[:,1]-hr, hr>T])
# create dataframe with tmp
df_time = pd.DataFrame(data=tmp, index=None, columns=['icustay_id','hr','early_flag'])
df_time.sort_values(['icustay_id','hr'],inplace=True)
# display the data for this icustay_id
print('\n\n ALL DATA FOR THIS ICUSTAY_ID \n\n')
display(HTML(df.loc[df['icustay_id']==iid,:].to_html().replace('NaN','')))
# display the times selected for this icustay_id
print('\n\n TIMES FOR THIS ICUSTAY_ID \n\n')
display(HTML(df_time.loc[df_time['icustay_id']==iid].to_html().replace('NaN','')))
# merge df_time with df to filter down to a subset of rows
df = df.loc[df['icustay_id']==iid,:].merge(df_time, left_on=['icustay_id','hr'], right_on=['icustay_id','hr'],how='inner')
df = df.loc[df['early_flag']==0,:]
display(HTML(df.to_html().replace('NaN','')))
print('\n\nFIRST\n\n')
display(HTML(df_tmp.groupby('icustay_id')[var_first].first().to_html().replace('NaN','')))
print('\n\nLAST\n\n')
display(HTML(df_tmp.groupby('icustay_id')[var_first].last().to_html().replace('NaN','')))
def collapse_data(data):
# this collapses a dictionary of dataframes into a single dataframe
# joins them together on icustay_id and charttime
files = data.keys()
initFlag = False # tells the function to create a new dataframe
# dictionary mapping table names to column name of interest
colNameMap = {'vent': 'vent',
'vasopressor': 'vasopressor',
'rrt_range': 'rrt'}
rangeTbl = ['vent','vasopressor','rrt_range']
# merge all data from above dictionary into a single data frame
for i, f in enumerate(files):
df_tmp = data[f]
if 'subject_id' in df_tmp.columns:
df_tmp.drop('subject_id',axis=1,inplace=True)
if 'hadm_id' in df_tmp.columns:
df_tmp.drop('hadm_id',axis=1,inplace=True)
if 'storetime' in df_tmp.columns:
df_tmp.drop('storetime',axis=1,inplace=True)
print('{:20s}... finished.'.format(f))
if f in rangeTbl:
continue # these are rangesignal tables.. need to be handled separately
if initFlag == False:
df = df_tmp.copy()
initFlag = True
continue
else:
df = df.merge(df_tmp,
on=['icustay_id','charttime_elapsed'],
how='outer')
return df
def plot_xgb_importance_fmap(xgb_model, X_header=None, ax=None, height=0.2,
xlim=None, ylim=None, title='Feature importance',
xlabel='F score', ylabel='Features',
importance_type='weight',
grid=True, **kwargs):
fmap = xgb_model.booster().get_score(importance_type=importance_type)
if X_header is not None:
feat_map = {}
for i in range(len(X_header)):
feat_map['f' + str(i)] = X_header[i]
importance = {}
for i in fmap:
importance[ feat_map[i] ] = fmap[i]
else:
importance = fmap
tuples = [(k, importance[k]) for k in importance]
tuples = sorted(tuples, key=lambda x: x[1])
labels, values = zip(*tuples)
if ax is None:
_, ax = plt.subplots(1, 1)
ylocs = np.arange(len(values))
ax.barh(ylocs, values, align='center', height=height, **kwargs)
for x, y in zip(values, ylocs):
ax.text(x + 1, y, x, va='center')
ax.set_yticks(ylocs)
ax.set_yticklabels(labels)
if xlim is not None:
if not isinstance(xlim, tuple) or len(xlim) != 2:
raise ValueError('xlim must be a tuple of 2 elements')
else:
xlim = (0, max(values) * 1.1)
ax.set_xlim(xlim)
if ylim is not None:
if not isinstance(ylim, tuple) or len(ylim) != 2:
raise ValueError('ylim must be a tuple of 2 elements')
else:
ylim = (-1, len(importance))
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax
def plot_vitals(df, iid, df_death=None, df_censor=None):
plt.figure(figsize=[14,10])
idx = df['icustay_id']==iid
plt.plot( df.loc[idx, 'hr'], df.loc[idx, 'heartrate'], 'ro-', label='Heart rate' )
plt.plot( df.loc[idx, 'hr'], df.loc[idx, 'sysbp'], 'b^', label='Systolic BP', alpha=0.5 )
plt.plot( df.loc[idx, 'hr'], df.loc[idx, 'diasbp'], 'bv', label='Diastolic BP', alpha=0.5 )
plt.plot( df.loc[idx, 'hr'], df.loc[idx, 'meanbp'], 'bd', label='Mean BP', alpha=0.8 )
plt.plot( df.loc[idx, 'hr'], df.loc[idx, 'resprate'], 'go', label='Respiratory rate', alpha=0.5 )
if df_death is not None:
# add in discharge/death time
idx = df_death['icustay_id']==iid
plt.plot( np.ones([2,])*df_death.loc[idx, 'dischtime_hours'].values, [0,200], 'k--', linewidth=2, label='Time of discharge' )
if df_death.loc[idx,'deathtime_hours'] is not np.nan:
plt.plot( np.ones([2,])*df_death.loc[idx, 'deathtime_hours'].values, [0,200], 'k-', linewidth=2, label='Time of death' )
plt.plot( np.ones([2,])*df_death.loc[idx, 'deathtime_hours'].values-24, [0,200], 'k:', linewidth=2, label='24 hr before death' )
plt.title('Died in hospital',fontsize=20)
plt.xlim([-1, df_death.loc[idx, 'dischtime_hours'].values+6])
if df_censor is not None:
idx = df_censor['icustay_id']==iid
if np.any(idx):
plt.plot( np.ones([2,])*df_censor.loc[idx, 'censortime_hours'].values, [0,200],
'm--', alpha=0.8, linewidth=3, label='DNR' )
plt.xlabel('Hours since ICU admission', fontsize=20)
plt.ylabel('Vital sign value', fontsize=20)
plt.grid()
plt.legend(loc='best')
plt.show()
def plot_model_results(results, pretty_labels=None):
if pretty_labels is None:
pretty_labels = {'xgb': 'GB', 'rf': 'RF', 'logreg': 'LR', 'lasso': 'LASSO'}
# make sure pretty_labels has all model names as a key
for x in results.keys():
if x not in pretty_labels:
pretty_labels[x] = x
plt.figure(figsize=[12,8])
for m, mdl in enumerate(results):
curr_score = results[mdl]
plt.plot(m*np.ones(len(curr_score)), curr_score,
marker=marker[m], color=col[m],
markersize=10, linewidth=2, linestyle=':',
label=pretty_labels[mdl])
plt.ylabel('AUROC',fontsize=18)
plt.xlim([-1,m+1])
plt.ylim([0.7,1.0])
plt.grid()
plt.gca().set_xticks(np.linspace(0,m,m+1))
plt.gca().set_xticklabels([pretty_labels[x] for x in results.keys()])
for tick in plt.gca().xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(16)
#plt.legend(loc='lower right',fontsize=18)
plt.show()
def load_design_matrix(co, df_additional_data=None, data_ext='', path=None, diedWithin=None):
# this function loads in the data from csv
# co is a dataframe with:
# - patients to include (all the icustay_ids in the index)
# - the outcome (first and only column)
if path is None:
path = ''
if data_ext != '' and data_ext[0] != '_':
data_ext = '_' + data_ext
df_offset = pd.read_csv(path + 'icustays_offset' + data_ext + '.csv')
df_offset['intime'] = pd.to_datetime(df_offset['intime'])
df_offset['outtime'] = pd.to_datetime(df_offset['outtime'])
df_offset['deathtime'] = pd.to_datetime(df_offset['deathtime'])
df_offset['icustay_id'] = df_offset['icustay_id'].astype(int)
df_offset = df_offset.loc[:,['icustay_id','intime','outtime','starttime','deathtime']]
df_offset.set_index('icustay_id',inplace=True)
# load in the design matrix
df_design = pd.read_csv(path + 'design_matrix' + data_ext + '.csv')
df_design['icustay_id'] = df_design['icustay_id'].astype(int)
df_design.set_index('icustay_id',inplace=True)
# join these dfs together, add in the static vars
df = co.merge(df_design, how='left', left_index=True, right_index=True)
if df_additional_data is not None:
df = df.merge(df_additional_data,how='left', left_index=True, right_index=True)
# change y to be "died within X seconds", where X is specified by the user
if diedWithin is not None:
df = df.merge(df_offset[['intime','deathtime','starttime']],
how='left', left_index=True, right_index=True)
df['hospital_expire_flag'] = np.zeros(df.shape[0],dtype=int)
idxUpdate = ~df['deathtime'].isnull()
df.loc[idxUpdate,'hospital_expire_flag'] = (df.loc[idxUpdate,'deathtime'] <
(df.loc[idxUpdate,'intime']
+ pd.to_timedelta(df.loc[idxUpdate,'starttime'], 's')
+ np.timedelta64(diedWithin, 's')))
# drop the columns temporarily added to redefine the outcome
df.drop('intime',axis=1,inplace=True)
df.drop('starttime',axis=1,inplace=True)
df.drop('deathtime',axis=1,inplace=True)
# HACK: drop some variables here we are not interested in
# they should be removed from vars_of_interest and data extraction re-run
vars_to_delete = ['bg_intubated_first', 'bg_ventilationrate_first', 'bg_ventilator_first',
'bg_intubated_last', 'bg_ventilationrate_last', 'bg_ventilator_last',
'rrt_min', 'vasopressor_min', 'vent_min',
'rrt_max', 'vasopressor_max', 'vent_max']
for v in vars_to_delete:
if v in df.columns:
df.drop(v, axis=1, inplace=True)
# move from a data frame into a numpy array
X = df.values.astype(float)
y = X[:,0]
icustay_id = df.index.values
# delete first column: the outcome
X = np.delete(X,0,axis=1)
# get a header row
X_header = [xval for x, xval in enumerate(df.columns) if x > 0]
return X, y, X_header
def get_predictions(df, df_static, mdl, iid):
df = df.loc[df['icustay_id']==iid,:]
tm = df['hr'].values
prob = list()
var_min, var_max, var_first, var_last, var_sum, var_first_early, var_last_early, var_static = vars_of_interest()
for t in tm:
time_dict = {iid: t}
X = get_design_matrix(df, time_dict, W=4, W_extra=24)
# first, the data from static vars from df_static
X = X.merge(df_static.set_index('icustay_id')[var_static], how='left', left_index=True, right_index=True)
# convert to numpy data
X = X.values
curr_prob = mdl.predict_proba(X)
prob.append(curr_prob[0,1])
return tm, prob
def get_data_at_time(df, df_static, iid, hour=0):
df = df.loc[df['icustay_id']==iid,:]
tm = df['hr'].values
var_min, var_max, var_first, var_last, var_sum, var_first_early, var_last_early, var_static = vars_of_interest()
idx = [i for i, tval in enumerate(tm) if tval==hour]
if len(idx)==0:
idx = [i for i,j in enumerate(tm) if i<hour]
if len(idx)==0:
idx = 0
else:
idx = idx[-1]
print('Hour not found! Using closest previous value: {}.'.format(tm[idx]))
else:
idx=idx[0]
t=tm[idx]
time_dict = {iid: t}
X = get_design_matrix(df, time_dict, W=4, W_extra=24)
# first, the data from static vars from df_static
X = X.merge(df_static.set_index('icustay_id')[var_static], how='left', left_index=True, right_index=True)
# convert to numpy data
X = X.values
return X
|
mit
|
deepesch/Data-Science-45min-Intros
|
support-vector-machines-101/kernel-examples.py
|
26
|
2054
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Josh Montague"
__license__="MIT License"
import sys
import json
import matplotlib.pyplot as plt
try:
import seaborn as sns
except ImportError as e:
sys.stderr.write("seaborn not installed. Using default matplotlib templates.")
import numpy as np
from sklearn.svm import SVC
# adapted from:
# http://scikit-learn.org/stable/auto_examples/svm/plot_svm_kernels.html
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf', 'sigmoid'):
#clf = SVC(kernel=kernel)
clf = SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(8, 6))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
facecolors='none', zorder=10, s=300)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired, s=100)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
#plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.pcolormesh(XX, YY, Z > 0, alpha=0.1)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.title('{}'.format(kernel))
#plt.xticks(())
#plt.yticks(())
fignum = fignum + 1
plt.show()
|
unlicense
|
tosolveit/scikit-learn
|
examples/cluster/plot_dbscan.py
|
346
|
2479
|
# -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
bsd-3-clause
|
scr4t/rep
|
rep/estimators/tmva.py
|
3
|
15341
|
"""
These classes are wrappers for physics machine learning library TMVA used .root format files (c++ library).
Now you can simply use it in python. TMVA contains classification and regression algorithms, including neural networks.
`TMVA guide <http://mirror.yandex.ru/gentoo-distfiles/distfiles/TMVAUsersGuide-v4.03.pdf>`_
"""
from __future__ import division, print_function, absolute_import
from abc import ABCMeta
from logging import getLogger
import os
import tempfile
import subprocess
from subprocess import PIPE
import shutil
import sys
from .interface import Classifier, Regressor
from .utils import check_inputs, score_to_proba, proba_to_two_dimension
from six.moves import cPickle
__author__ = 'Tatiana Likhomanenko'
logger = getLogger(__name__)
# those parameters that shall not be passed to options of TMVA classifier
_PASS_PARAMETERS = {'random_state'}
__all__ = ['TMVABase', 'TMVAClassifier', 'TMVARegressor']
class _AdditionalInformation():
"""
Additional information for tmva factory (used in training)
"""
def __init__(self, directory, model_type='classification'):
self.directory = directory
self.tmva_root = 'result.root'
self.tmva_job = "TMVAEstimation"
self.model_type = model_type
class _AdditionalInformationPredict():
"""
Additional information for tmva factory (used in predictions)
"""
def __init__(self, directory, xml_file, method_name, model_type=('classification', None)):
self.directory = directory
self.xml_file = xml_file
self.method_name = method_name
self.model_type = model_type
self.result_filename = os.path.join(directory, 'dump_predictions.pkl')
class TMVABase(object):
"""
TMVABase - base estimator for tmva wrappers.
Parameters:
-----------
:param str method: algorithm method (default='kBDT')
:param features: features used in training
:type features: list[str] or None
:param str factory_options: system options
:param dict method_parameters: estimator options
.. note:: TMVA doesn't support staged predictions and features importances =((
"""
__metaclass__ = ABCMeta
def __init__(self,
factory_options="",
method='kBDT',
**method_parameters):
self.method = method
self._method_name = 'REP_Estimator'
self.factory_options = factory_options
self.method_parameters = method_parameters
# contents of xml file with formula, read into memory
self.formula_xml = None
@staticmethod
def _create_tmp_directory():
return tempfile.mkdtemp(dir=os.getcwd())
@staticmethod
def _remove_tmp_directory(directory):
shutil.rmtree(directory, ignore_errors=True)
def _fit(self, X, y, sample_weight=None, model_type='classification'):
"""
Train the classifier
:param pandas.DataFrame X: data shape [n_samples, n_features]
:param list | numpy.array y: values - array-like of shape [n_samples]
:param list | numpy.array sample_weight: weight of events,
array-like of shape [n_samples] or None if all weights are equal
:return: self
"""
# saving data to 2 different root files.
directory = self._create_tmp_directory()
add_info = _AdditionalInformation(directory, model_type=model_type)
try:
self._run_tmva_training(add_info, X, y, sample_weight)
finally:
self._remove_tmp_directory(directory)
return self
def _run_tmva_training(self, info, X, y, sample_weight):
"""
Run subprocess to train tmva factory
:param info: class with additional information
"""
tmva_process = subprocess.Popen(
'cd {directory}; {executable} -c "from rep.estimators import _tmvaFactory; _tmvaFactory.main()"'.format(
directory=info.directory,
executable=sys.executable),
stdin=PIPE, stdout=PIPE, stderr=subprocess.STDOUT,
shell=True)
cPickle.dump(self, tmva_process.stdin)
cPickle.dump(info, tmva_process.stdin)
cPickle.dump(X, tmva_process.stdin)
cPickle.dump(y, tmva_process.stdin)
cPickle.dump(sample_weight, tmva_process.stdin)
stdout, stderr = tmva_process.communicate()
assert tmva_process.returncode == 0, \
'ERROR: TMVA process is incorrect finished \n LOG: %s \n %s' % (stderr, stdout)
xml_filename = os.path.join(info.directory, 'weights',
'{job}_{name}.weights.xml'.format(job=info.tmva_job, name=self._method_name))
with open(xml_filename, 'r') as xml_file:
self.formula_xml = xml_file.read()
def _check_fitted(self):
assert self.formula_xml is not None, "Classifier wasn't fitted, please call `fit` first"
def _predict(self, X, model_type=('classification', None)):
"""
Predict data
:param pandas.DataFrame X: data shape [n_samples, n_features]
:return: predicted values of shape n_samples
"""
self._check_fitted()
directory = self._create_tmp_directory()
try:
with tempfile.NamedTemporaryFile(mode="w", suffix='.xml', dir=directory, delete=True) as file_xml:
file_xml.write(self.formula_xml)
file_xml.flush()
add_info = _AdditionalInformationPredict(directory, file_xml.name, self._method_name,
model_type=model_type)
prediction = self._run_tmva_predict(add_info, X)
finally:
self._remove_tmp_directory(directory)
return prediction
def _run_tmva_predict(self, info, data):
"""
Run subprocess to train tmva factory
:param info: class with additional information
"""
tmva_process = subprocess.Popen(
'cd {directory}; {executable} -c "from rep.estimators import _tmvaReader; _tmvaReader.main()"'.format(
directory=info.directory,
executable=sys.executable),
stdin=PIPE, stdout=PIPE, stderr=subprocess.STDOUT,
shell=True)
cPickle.dump(info, tmva_process.stdin)
cPickle.dump(data, tmva_process.stdin)
stdout, stderr = tmva_process.communicate()
assert tmva_process.returncode == 0, \
'ERROR: TMVA process is incorrect finished \n LOG: %s \n %s' % (stderr, stdout)
with open(info.result_filename, 'rb') as predictions_file:
predictions = cPickle.load(predictions_file)
return predictions
class TMVAClassifier(TMVABase, Classifier):
"""
TMVAClassifier wraps classifiers from TMVA (CERN library for machine learning)
Parameters:
-----------
:param str method: algorithm method (default='kBDT')
:param features: features used in training
:type features: list[str] or None
:param str factory_options: options, for example::
"!V:!Silent:Color:Transformations=I;D;P;G,D"
:param str sigmoid_function: function which is used to convert TMVA output to probabilities;
* *identity* (use for svm, mlp) --- the same output, use this for methods returning class probabilities
* *sigmoid* --- sigmoid transformation, use it if output varies in range [-infinity, +infinity]
* *bdt* (for bdt algorithms output varies in range [-1, 1])
* *sig_eff=0.4* --- for rectangular cut optimization methods,
for instance, here 0.4 will be used as signal efficiency to evaluate MVA,
(put any float number from [0, 1])
:param dict method_parameters: estimator options, example: NTrees=100, BoostType='Grad'
.. warning::
TMVA doesn't support *staged_predict_proba()* and *feature_importances__*
.. warning::
TMVA doesn't support multiclassification, only two-class classification
`TMVA guide <http://mirror.yandex.ru/gentoo-distfiles/distfiles/TMVAUsersGuide-v4.03.pdf>`_
"""
def __init__(self,
method='kBDT',
features=None,
factory_options="",
sigmoid_function='bdt',
**method_parameters):
# !V:!Silent:Color:Transformations=I;D;P;G,D
TMVABase.__init__(self, factory_options=factory_options, method=method, **method_parameters)
Classifier.__init__(self, features=features)
self.sigmoid_function = sigmoid_function
def _set_classes_special(self, y):
self._set_classes(y)
assert self.n_classes_ == 2, "Support only 2 classes (data contain {})".format(self.n_classes_)
def set_params(self, **params):
"""
Set the parameters of this estimator.
:param dict params: parameters to set in model
"""
for k, v in params.items():
if hasattr(self, k):
setattr(self, k, v)
else:
if k in _PASS_PARAMETERS:
continue
self.method_parameters[k] = v
def get_params(self, deep=True):
"""
Get parameters for this estimator.
deep: boolean, optional
If True, will return the parameters for this estimator and contained subobjects that are estimators.
params : mapping of string to any
Parameter names mapped to their values.
"""
parameters = self.method_parameters.copy()
parameters['method'] = self.method
parameters['factory_options'] = self.factory_options
parameters['features'] = self.features
return parameters
def fit(self, X, y, sample_weight=None):
"""
Train the classifier
:param pandas.DataFrame X: data shape [n_samples, n_features]
:param y: labels of events - array-like of shape [n_samples]
:param sample_weight: weight of events,
array-like of shape [n_samples] or None if all weights are equal
:return: self
"""
X, y, sample_weight = check_inputs(X, y, sample_weight=sample_weight, allow_none_weights=False)
X = self._get_features(X).copy()
self._set_classes_special(y)
if self.n_classes_ == 2:
self.factory_options = '{}:AnalysisType=Classification'.format(self.factory_options)
else:
self.factory_options = '{}:AnalysisType=Multiclass'.format(self.factory_options)
return self._fit(X, y, sample_weight=sample_weight)
def predict_proba(self, X):
"""
Predict probabilities for new data.
:param pandas.DataFrame X: data shape [n_samples, n_features]
:rtype: numpy.array of shape [n_samples, n_classes] with probabilities
"""
X = self._get_features(X)
prediction = self._predict(X, model_type=('classification', self.sigmoid_function))
return self._convert_output(prediction)
def _convert_output(self, prediction):
variants = {'bdt', 'sigmoid', 'identity'}
if 'sig_eff' in self.sigmoid_function:
return proba_to_two_dimension(prediction)
assert self.sigmoid_function in variants, \
'sigmoid_function parameter must be one of {}, instead of {}'.format(variants, self.sigmoid_function)
if self.sigmoid_function == 'sigmoid':
return score_to_proba(prediction)
elif self.sigmoid_function == 'bdt':
return proba_to_two_dimension((prediction + 1.) / 2.)
else:
return proba_to_two_dimension(prediction)
def staged_predict_proba(self, X):
"""
Predicts probabilities on each stage
:param pandas.DataFrame X: data shape [n_samples, n_features]
:return: iterator
.. warning:: Not supported for TMVA (**AttributeError** will be thrown)
"""
raise AttributeError("Not supported for TMVA")
class TMVARegressor(TMVABase, Regressor):
"""
TMVARegressor wraps regressors from TMVA (CERN library for machine learning)
Parameters:
-----------
:param str method: algorithm method (default='kBDT')
:param features: features used in training
:type features: list[str] or None
:param str factory_options: options, for example::
"!V:!Silent:Color:Transformations=I;D;P;G,D"
:param dict method_parameters: estimator options, example: NTrees=100, BoostType=Grad
.. note::
TMVA doesn't support *staged_predict()* and *feature_importances__*
`TMVA guide <http://mirror.yandex.ru/gentoo-distfiles/distfiles/TMVAUsersGuide-v4.03.pdf>`_
"""
def __init__(self,
method='kBDT',
features=None,
factory_options="",
**method_parameters):
TMVABase.__init__(self, factory_options=factory_options, method=method, **method_parameters)
Regressor.__init__(self, features=features)
def set_params(self, **params):
"""
Set the parameters of this estimator.
:param dict params: parameters to set in model
"""
for k, v in params.items():
if hasattr(self, k):
setattr(self, k, v)
else:
if k in _PASS_PARAMETERS:
continue
self.method_parameters[k] = v
def get_params(self, deep=True):
"""
Get parameters for this estimator.
deep: boolean, optional
If True, will return the parameters for this estimator and contained subobjects that are estimators.
params : mapping of string to any
Parameter names mapped to their values.
"""
parameters = self.method_parameters.copy()
parameters['method'] = self.method
parameters['factory_options'] = self.factory_options
parameters['features'] = self.features
return parameters
def fit(self, X, y, sample_weight=None):
"""
Train the classifier
:param pandas.DataFrame X: data shape [n_samples, n_features]
:param y: values - array-like of shape [n_samples]
:param sample_weight: weight of events,
array-like of shape [n_samples] or None if all weights are equal
:return: self
"""
X, y, sample_weight = check_inputs(X, y, sample_weight=sample_weight, allow_none_weights=False)
X = self._get_features(X).copy()
self.factory_options = '{}:AnalysisType=Regression'.format(self.factory_options)
return self._fit(X, y, sample_weight=sample_weight, model_type='regression')
def predict(self, X):
"""
Predict data
:param pandas.DataFrame X: data shape [n_samples, n_features]
:return: numpy.array of shape n_samples with values
"""
X = self._get_features(X)
return self._predict(X, model_type=('regression', None))
def staged_predict(self, X):
"""
Predicts values on each stage
:param pandas.DataFrame X: data shape [n_samples, n_features]
:return: iterator
.. warning:: Not supported for TMVA (**AttributeError** will be thrown)
"""
raise AttributeError("Not supported for TMVA")
|
apache-2.0
|
yangautumn/turing_pattern
|
amorphous_pattern/test.py
|
1
|
1800
|
"""
# demo of plotting squared subfigures
"""
# import matplotlib.pyplot as plt
# fig, ax = plt.subplots()
# x = [0, 0.2, 0.4, 0.6, 0.8]
# y = [0, 0.5, 1, 1.5, 2.0]
# colors = ['k']*len(x)
# ax.scatter(x, y, c=colors, alpha=0.5)
# ax.set_xlim((0,2))
# ax.set_ylim((0,2))
# x0,x1 = ax.get_xlim()
# y0,y1 = ax.get_ylim()
# ax.set_aspect(abs(x1-x0)/abs(y1-y0))
# ax.grid(b=True, which='major', color='k', linestyle='--')
# fig.savefig('test.png', dpi=600)
# plt.close(fig)
"""
# demo of plotting ellipse
"""
# import matplotlib.pyplot as plt
# import numpy.random as rnd
# from matplotlib.patches import Ellipse
# NUM = 250
# ells = [Ellipse(xy=rnd.rand(2)*10, width=rnd.rand(), height=rnd.rand(), angle=rnd.rand()*360)
# for i in range(NUM)]
# fig = plt.figure(0)
# ax = fig.add_subplot(111, aspect='equal')
# for e in ells:
# ax.add_artist(e)
# e.set_clip_box(ax.bbox)
# e.set_alpha(rnd.rand())
# e.set_facecolor(rnd.rand(3))
# ax.set_xlim(0, 10)
# ax.set_ylim(0, 10)
# plt.show()
import os, sys, time
# interval = 10
# while True:
# for i in range(interval, 0, -1):
# sys.stdout.write("\033[K") # Clear to the end of line
# print("{} model(s) are being recorded. Next check in {} seconds".format(i%3+1, i))
# sys.stdout.write("\033[K")
# time.sleep(1)
# print("the following models are being recorded: {}".format(i), end="\r")
# time.sleep(1)
# sys.stdout.write("\033[F") # Cursor up one line
while True:
print("Yang Li", end='\r')
time.sleep(1)
sys.stdout.write("\033[K") # Clear to the end of line
time.sleep(1)
print("Zhang")
time.sleep(1)
print('Wenxin', end='\r')
time.sleep(1)
sys.stdout.write("\033[K") # Clear to the end of line
sys.stdout.write("\033[F") # Cursor up one line
|
gpl-3.0
|
mrshu/scikit-learn
|
benchmarks/bench_plot_svd.py
|
3
|
2806
|
"""Benchmarks of Singular Values Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print '===================='
print 'Iteration %03d of %03d' % (it, max_it)
print '===================='
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print "benching scipy svd: "
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print "benching scikit-learn randomized_svd: n_iter=0"
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print ("benching scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
fig = plt.figure()
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('time (s)')
ax.legend()
plt.show()
|
bsd-3-clause
|
alexeyum/scikit-learn
|
sklearn/grid_search.py
|
3
|
38523
|
"""
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
from .exceptions import ChangedBehaviorWarning
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
warnings.warn("This module has been deprecated in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. This module will be removed in 0.20.",
DeprecationWarning)
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
.. versionchanged:: 0.17
Upgraded to joblib 0.9.3.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
|
bsd-3-clause
|
LFPy/LFPy
|
LFPy/inputgenerators.py
|
1
|
4009
|
#!/usr/bin/env python
"""Copyright (C) 2012 Computational Neuroscience Group, NMBU.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
import numpy as np
import scipy.stats
def get_activation_times_from_distribution(n, tstart=0., tstop=1.E6,
distribution=scipy.stats.expon,
rvs_args=dict(loc=0, scale=1),
maxiter=1E6):
"""
Construct a length n list of ndarrays containing continously increasing
random numbers on the interval [tstart, tstop], with intervals drawn from
a chosen continuous random variable distribution subclassed from
scipy.stats.rv_continous, e.g., scipy.stats.expon or scipy.stats.gamma.
The most likely initial first entry is
``tstart + method.rvs(size=inf, **rvs_args).mean()``
Parameters
----------
n: int
number of ndarrays in list
tstart: float
minimum allowed value in ndarrays
tstop: float
maximum allowed value in ndarrays
distribution: object
subclass of scipy.stats.rv_continous. Distributions
producing negative values should be avoided if continously increasing
values should be obtained, i.e., the probability density function
``(distribution.pdf(**rvs_args))`` should be ``0`` for ``x < 0``,
which is not explicitly tested for.
rvs_args: dict
parameters for method.rvs method. If "size" is in dict, then tstop will
be ignored, and each ndarray in output list will be
``distribution.rvs(**rvs_args).cumsum() + tstart``. If size is not
given in dict, then values up to tstop will be included
maxiter: int
maximum number of iterations
Returns
-------
list of ndarrays
length n list of arrays containing data
Raises
------
AssertionError
if distribution does not have the 'rvs' attribute
StopIteration
if number of while-loop iterations reaches maxiter
Examples
--------
Create n sets of activation times with intervals drawn from the exponential
distribution, with rate expectation lambda 10 s^-1 (thus
scale=1000 / lambda). Here we assume output in units of ms
>>> from LFPy.inputgenerators import get_activation_times_from_distribution
>>> import scipy.stats as st
>>> import matplotlib.pyplot as plt
>>> times = get_activation_times_from_distribution(n=10, tstart=0.,
>>> tstop=1000.,
>>> distribution=st.expon,
>>> rvs_args=dict(loc=0.,
>>> scale=100.))
"""
assert hasattr(distribution, 'rvs'), \
'distribution={} must have the attribute "rvs"'.format(distribution)
times = []
if 'size' in rvs_args.keys():
for i in range(n):
times += [distribution.rvs(**rvs_args).cumsum() + tstart]
else:
for i in range(n):
values = distribution.rvs(size=1000, **rvs_args).cumsum() + tstart
iter = 0
while values[-1] < tstop and iter < maxiter:
values = np.r_[values, distribution.rvs(
size=1000, **rvs_args).cumsum() + values[-1]]
iter += 1
if iter == maxiter:
raise StopIteration('maximum number of iterations reach. Con')
times += [values[values < tstop]]
return times
|
gpl-3.0
|
JensWehner/votca-scripts
|
xtp/xtp_energielevels.py
|
2
|
3385
|
#!/usr/bin/env python
import sys
import numpy as np
import matplotlib.pyplot as plt
if len(sys.argv)==2:
infile=sys.argv[1]
export=False
elif len(sys.argv)==3:
infile=sys.argv[1]
gnufile=sys.argv[2]
export=True
else:
print "Wrong number of arguments simply specify first the profile.dat file and then optionally a file for output.Exiting"
sys.exit()
z=[]
EA=[]
IP=[]
dEA=[]
dIP=[]
with open (infile,"r") as f:
for line in f:
if "#" not in line:
lineparts=line.split()
IPblocked=False
dIPblocked=False
EAblocked=False
dEAblocked=False
#print lineparts
for i in range(len(lineparts)):
if lineparts[i]!='-nan' and i>0:
#print i%4,i,lineparts[i],lineparts[0],line
if i%4==1:
if not IPblocked:
IP.append(float(lineparts[i]))
IPblocked=True
else:
print "Two elements at same position"
elif i%4==3:
if not dIPblocked:
dIP.append(float(lineparts[i]))
dIPblocked=True
elif i%4==2:
if not EAblocked:
EA.append(float(lineparts[i]))
EAblocked=True
elif i%4==0:
if not dEAblocked:
dEA.append(float(lineparts[i]))
dEAblocked=True
else:
print i
if IPblocked+dIPblocked+EAblocked+dEAblocked!=0:
z.append(float(lineparts[0]))
profile=np.array([z,IP,dIP,EA,dEA])
plt.errorbar(profile[0],profile[1],profile[2],marker="o")
plt.errorbar(profile[0],-profile[3],profile[4],marker="x")
plt.axis('tight')
plt.show()
if export==True:
np.savetxt(gnufile, profile.T, delimiter="\t")
|
apache-2.0
|
Akshay0724/scikit-learn
|
examples/svm/plot_separating_hyperplane.py
|
294
|
1273
|
"""
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
NirBenTalLab/proorigami-cde-package
|
cde-root/usr/lib64/python2.4/site-packages/numpy/lib/polynomial.py
|
2
|
35562
|
"""
Functions to operate on polynomials.
"""
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import isscalar, abs, finfo, atleast_1d, hstack
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag
from numpy.linalg import eigvals, lstsq
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0]) # i.e., z**3 + 0*z**2 + 0*z + 0
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ]) # z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ])
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1]:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) ==1:
pass
else:
raise ValueError, "input must be 1d or square 2d array."
if len(seq_of_zeros) == 0:
return 1.0
a = [1]
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0,roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like of shape(M,)
Rank-1 array of polynomial co-efficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError:
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with
a given sequence of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] Wikipedia, "Companion matrix",
http://en.wikipedia.org/wiki/Companion_matrix
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError,"Input must be a rank-1 array."
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0, :] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : {array_like, poly1d}
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : {None, list of `m` scalars, scalar}, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3., 5., 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError, "Order of integral must be positive (see polyder)"
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError, \
"k must be a scalar or a rank-1 array of length 1 or >m."
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p)-1
y = p[:-1] * NX.arange(n, 0, -1)
if m < 0:
raise ValueError, "Order of derivative must be positive (see polyint)"
if m == 0:
return p
else:
val = polyder(y, m-1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than this
relative to the largest singular value will be ignored. The default
value is len(x)*eps, where eps is the relative precision of the float
type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is
False (the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is also
returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first.
If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond : present only if `full` = True
Residuals of the least-squares fit, the effective rank of the scaled
Vandermonde coefficient matrix, its singular values, and the specified
value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[n] + ... + x[0] * p[1] + p[0] = y[0]
x[1]**n * p[n] + ... + x[1] * p[1] + p[0] = y[1]
...
x[k]**n * p[n] + ... + x[k] * p[1] + p[0] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError, "expected deg >= 0"
if x.ndim != 1:
raise TypeError, "expected 1D vector for x"
if x.size == 0:
raise TypeError, "expected non-empty vector for x"
if y.ndim < 1 or y.ndim > 2 :
raise TypeError, "expected 1D or 2D array for y"
if x.shape[0] != y.shape[0] :
raise TypeError, "expected x and y to have same length"
# set rcond
if rcond is None :
rcond = len(x)*finfo(x.dtype).eps
# scale x to improve condition number
scale = abs(x).max()
if scale != 0 :
x /= scale
# solve least squares equation for powers of x
v = vander(x, order)
c, resids, rank, s = lstsq(v, y, rcond)
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
# scale returned coefficients
if scale != 0 :
if c.ndim == 1 :
c /= vander([scale], order)[0]
else :
c /= vander([scale], order).T
if full :
return c, resids, rank, s, rcond
else :
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = x * y + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1,a2 = poly1d(a1),poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
>>> (array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while 1:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2)+len(toadd2) > wrap) or \
(len(line1)+len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # i.e., (0, 0)
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([4]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError, "Polynomial must be 1d only."
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError, "Power to non-negative integers only."
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
def __eq__(self, other):
return NX.alltrue(self.coeffs == other.coeffs)
def __ne__(self, other):
return NX.any(self.coeffs != other.coeffs)
def __setattr__(self, key, val):
raise ValueError, "Attributes cannot be changed this way."
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c','coef','coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError, "Does not support negative powers."
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always',RankWarning)
|
mit
|
PlayUAV/MissionPlanner
|
Lib/site-packages/numpy/lib/npyio.py
|
53
|
59490
|
__all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
import numpy as np
import format
import sys
import os
import sys
import itertools
import warnings
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
if sys.platform != 'cli':
from _compiled_base import packbits, unpackbits
else:
def packbits(*args, **kw):
raise NotImplementedError()
def unpackbits(*args, **kw):
raise NotImplementedError()
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype, _bytes_to_name
from numpy.compat import asbytes, asstr, asbytes_nested, bytes
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError, "Illegal argument"
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
g = GzipFile(fileobj=f.fileobj)
g.name = f.name
g.mode = f.mode
f = g
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
self._obj = obj
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError, key
def zipfile_factory(*args, **kwargs):
import zipfile
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = BytesIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError, "%s is not a file in the archive" % key
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load a pickled, ``.npy``, or ``.npz`` binary file.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap`). The mode has no effect for pickled or
zipped files.
A memory-mapped array is stored on disk, and not directly loaded
into memory. However, it can be accessed and sliced like any
ndarray. Memory mapping is especially useful for accessing
small fragments of large files without reading the entire file
into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever is stored in the
pickle is returned.
- If the file is a ``.npy`` file, then an array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
own_fid = True
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
own_fid = False
return NpzFile(fid, own_fid=True)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError, \
"Failed to interpret file %s as a pickle" % repr(file)
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
*args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
**kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with *args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with **kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
See Also
--------
numpy.savez_compressed : Save several arrays into a compressed .npz file format
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : string
File name of .npz file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed .npz file format
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError, "Cannot use un-named variables and keyword %s" % key
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zip = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is ``.gz`` or
``.bz2``, the file is first decompressed.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a record
data-type, the resulting array will be 1-dimensional, and each row
will be interpreted as an element of the array. In this case, the
number of columns used must match the number of fields in the
data-type.
comments : str, optional
The character used to indicate the start of a comment; default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data:
``converters = {3: lambda s: float(s or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. The default is False.
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
if delimiter is not None:
delimiter = asbytes(delimiter)
user_converters = converters
if usecols is not None:
usecols = list(usecols)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
fh = seek_gzip_factory(fname)
elif fname.endswith('.bz2'):
import bz2
fh = bz2.BZ2File(fname)
else:
fh = open(fname, 'U')
elif hasattr(fname, 'readline'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
return [dt.base] * int(np.prod(dt.shape))
else:
types = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt = flatten_dtype(tp)
types.extend(flat_dt)
return types
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip()
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.readline()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
while not first_vals:
first_line = fh.readline()
if not first_line: # EOF reached
raise IOError('End-of-file reached before encountering data.')
first_vals = split_line(first_line)
N = len(usecols or first_vals)
dtype_types = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
X.append(tuple([conv(val) for (conv, val) in zip(converters, vals)]))
finally:
if own_fh:
fh.close()
if len(dtype_types) > 1:
# We're dealing with a structured array, with a dtype such as
# [('x', int), ('y', [('s', int), ('t', float)])]
#
# First, create the array using a flattened dtype:
# [('x', int), ('s', int), ('t', float)]
#
# Then, view the array using the specified dtype.
try:
X = np.array(X, dtype=np.dtype([('', t) for t in dtype_types]))
X = X.view(dtype)
except TypeError:
# In the case we have an object dtype
X = np.array(X, dtype=dtype)
else:
X = np.array(X, dtype)
X = np.squeeze(X)
if unpack:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n'):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
newline : str
.. versionadded:: 1.5.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
finally:
if own_fh:
fh.close()
import re
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
fh.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skiprows` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is `.gz` or
`.bz2`, the file is first decompressed.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable or None, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable or None, optional
The set of strings corresponding to missing data.
filling_values : variable or None, optional
The set of values to be used as default when the data are missing.
usecols : sequence or None, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skiprows` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables names.
By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
if isinstance(fname, basestring):
fhd = np.lib._datasource.open(fname, 'U')
own_fhd = True
elif not hasattr(fname, 'read'):
raise TypeError("The input should be a string or a filehandle. "\
"(got %s instead)" % type(fname))
else:
fhd = fname
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn("The use of `skiprows` is deprecated.\n"\
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.readline()
# Keep on until we find the first valid values
first_values = None
while not first_values:
first_line = fhd.readline()
if not first_line:
raise IOError('End-of-file reached before encountering data.')
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = dtype.names
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn("The use of `missing` is deprecated.\n"\
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
else:
rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
|
gpl-3.0
|
wlamond/scikit-learn
|
sklearn/calibration.py
|
37
|
20332
|
"""Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from sklearn.preprocessing import LabelEncoder
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import label_binarize, LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted, check_consistent_length
from .utils.fixes import signature
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .model_selection import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is assumed that base_estimator has been fitted already and all
data is used for calibration. Note that data for fitting the
classifier and for calibrating it must be disjoint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' or 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach. It is not advised to use isotonic calibration
with too few calibration samples ``(<<1000)`` since it tends to
overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer, cross-validation generator, iterable or "prefit", optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. If ``y`` is
neither binary nor multiclass, :class:`sklearn.model_selection.KFold`
is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_ : list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
le = LabelBinarizer().fit(y)
self.classes_ = le.classes_
# Check that each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in
self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, y, classifier=True)
fit_parameters = signature(base_estimator.fit).parameters
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in fit_parameters):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
base_estimator_sample_weight = sample_weight
for train, test in cv.split(X, y):
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method,
classes=self.classes_)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classifiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach based on isotonic regression.
classes : array-like, shape (n_classes,), optional
Contains unique classes used to fit the base estimator.
if None, then classes is extracted from the given target values
in fit().
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid', classes=None):
self.base_estimator = base_estimator
self.method = method
self.classes = classes
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = self.label_encoder_.\
transform(self.base_estimator.classes_)
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
self.label_encoder_ = LabelEncoder()
if self.classes is None:
self.label_encoder_.fit(y)
else:
self.label_encoder_.fit(self.classes)
self.classes_ = self.label_encoder_.classes_
Y = label_binarize(y, self.classes_)
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Calibration curves may also be referred to as reliability diagrams.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
|
bsd-3-clause
|
zfrenchee/pandas
|
pandas/core/reshape/concat.py
|
1
|
21480
|
"""
concat routines
"""
import numpy as np
from pandas import compat, DataFrame, Series, Index, MultiIndex
from pandas.core.index import (_get_objs_combined_axis,
_ensure_index, _get_consensus_names,
_all_indexes_same)
from pandas.core.categorical import (_factorize_from_iterable,
_factorize_from_iterables)
from pandas.core.internals import concatenate_block_managers
from pandas.core import common as com
from pandas.core.generic import NDFrame
import pandas.core.dtypes.concat as _concat
# ---------------------------------------------------------------------
# Concatenate DataFrame objects
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
copy=True):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis(es)
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic
ignore_index : boolean, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys
names : list, default None
Names for the levels in the resulting hierarchical index
verify_integrity : boolean, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation
copy : boolean, default True
If False, do not copy data unnecessarily
Returns
-------
concatenated : object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
pandas objects can be found `here
<http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.
See Also
--------
Series.append
DataFrame.append
DataFrame.join
DataFrame.merge
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2',])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3])
animal letter number
0 NaN a 1
1 NaN b 2
0 cat c 3
1 dog d 4
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a']
"""
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
verify_integrity=verify_integrity,
copy=copy)
return op.get_result()
class _Concatenator(object):
"""
Orchestrates a concatenation operation for BlockManagers
"""
def __init__(self, objs, axis=0, join='outer', join_axes=None,
keys=None, levels=None, names=None,
ignore_index=False, verify_integrity=False, copy=True):
if isinstance(objs, (NDFrame, compat.string_types)):
raise TypeError('first argument must be an iterable of pandas '
'objects, you passed an object of type '
'"{name}"'.format(name=type(objs).__name__))
if join == 'outer':
self.intersect = False
elif join == 'inner':
self.intersect = True
else: # pragma: no cover
raise ValueError('Only can inner (intersect) or outer (union) '
'join the other axis')
if isinstance(objs, dict):
if keys is None:
keys = sorted(objs)
objs = [objs[k] for k in keys]
else:
objs = list(objs)
if len(objs) == 0:
raise ValueError('No objects to concatenate')
if keys is None:
objs = list(com._not_none(*objs))
else:
# #1649
clean_keys = []
clean_objs = []
for k, v in zip(keys, objs):
if v is None:
continue
clean_keys.append(k)
clean_objs.append(v)
objs = clean_objs
name = getattr(keys, 'name', None)
keys = Index(clean_keys, name=name)
if len(objs) == 0:
raise ValueError('All objects passed were None')
# consolidate data & figure out what our result ndim is going to be
ndims = set()
for obj in objs:
if not isinstance(obj, NDFrame):
msg = ('cannot concatenate object of type "{0}";'
' only pd.Series, pd.DataFrame, and pd.Panel'
' (deprecated) objs are valid'.format(type(obj)))
raise TypeError(msg)
# consolidate
obj._consolidate(inplace=True)
ndims.add(obj.ndim)
# get the sample
# want the highest ndim that we have, and must be non-empty
# unless all objs are empty
sample = None
if len(ndims) > 1:
max_ndim = max(ndims)
for obj in objs:
if obj.ndim == max_ndim and np.sum(obj.shape):
sample = obj
break
else:
# filter out the empties if we have not multi-index possibilities
# note to keep empty Series as it affect to result columns / name
non_empties = [obj for obj in objs
if sum(obj.shape) > 0 or isinstance(obj, Series)]
if (len(non_empties) and (keys is None and names is None and
levels is None and
join_axes is None and
not self.intersect)):
objs = non_empties
sample = objs[0]
if sample is None:
sample = objs[0]
self.objs = objs
# Standardize axis parameter to int
if isinstance(sample, Series):
axis = DataFrame()._get_axis_number(axis)
else:
axis = sample._get_axis_number(axis)
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, DataFrame)
if self._is_frame:
axis = 1 if axis == 0 else 0
self._is_series = isinstance(sample, Series)
if not 0 <= axis <= sample.ndim:
raise AssertionError("axis must be between 0 and {ndim}, input was"
" {axis}".format(ndim=sample.ndim, axis=axis))
# if we have mixed ndims, then convert to highest ndim
# creating column numbers as needed
if len(ndims) > 1:
current_column = 0
max_ndim = sample.ndim
self.objs, objs = [], self.objs
for obj in objs:
ndim = obj.ndim
if ndim == max_ndim:
pass
elif ndim != max_ndim - 1:
raise ValueError("cannot concatenate unaligned mixed "
"dimensional NDFrame objects")
else:
name = getattr(obj, 'name', None)
if ignore_index or name is None:
name = current_column
current_column += 1
# doing a row-wise concatenation so need everything
# to line up
if self._is_frame and axis == 1:
name = 0
obj = sample._constructor({name: obj})
self.objs.append(obj)
# note: this is the BlockManager axis (since DataFrame is transposed)
self.axis = axis
self.join_axes = join_axes
self.keys = keys
self.names = names or getattr(keys, 'names', None)
self.levels = levels
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
self.copy = copy
self.new_axes = self._get_new_axes()
def get_result(self):
# series only
if self._is_series:
# stack blocks
if self.axis == 0:
name = com._consensus_name_attr(self.objs)
mgr = self.objs[0]._data.concat([x._data for x in self.objs],
self.new_axes)
cons = _concat._get_series_result_type(mgr, self.objs)
return cons(mgr, name=name).__finalize__(self, method='concat')
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
cons = _concat._get_series_result_type(data)
index, columns = self.new_axes
df = cons(data, index=index)
df.columns = columns
return df.__finalize__(self, method='concat')
# combine block managers
else:
mgrs_indexers = []
for obj in self.objs:
mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
if ax == self.axis:
# Suppress reindexing on concat axis
continue
obj_labels = mgr.axes[ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindex(new_labels)[1]
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
mgrs_indexers, self.new_axes, concat_axis=self.axis,
copy=self.copy)
if not self.copy:
new_data._consolidate_inplace()
cons = _concat._get_frame_result_type(new_data, self.objs)
return (cons._from_axes(new_data, self.new_axes)
.__finalize__(self, method='concat'))
def _get_result_dim(self):
if self._is_series and self.axis == 1:
return 2
else:
return self.objs[0].ndim
def _get_new_axes(self):
ndim = self._get_result_dim()
new_axes = [None] * ndim
if self.join_axes is None:
for i in range(ndim):
if i == self.axis:
continue
new_axes[i] = self._get_comb_axis(i)
else:
if len(self.join_axes) != ndim - 1:
raise AssertionError("length of join_axes must not be equal "
"to {length}".format(length=ndim - 1))
# ufff...
indices = compat.lrange(ndim)
indices.remove(self.axis)
for i, ax in zip(indices, self.join_axes):
new_axes[i] = ax
new_axes[self.axis] = self._get_concat_axis()
return new_axes
def _get_comb_axis(self, i):
data_axis = self.objs[0]._get_block_manager_axis(i)
try:
return _get_objs_combined_axis(self.objs, axis=data_axis,
intersect=self.intersect)
except IndexError:
types = [type(x).__name__ for x in self.objs]
raise TypeError("Cannot concatenate list of {types}"
.format(types=types))
def _get_concat_axis(self):
"""
Return index to be used along concatenation axis.
"""
if self._is_series:
if self.axis == 0:
indexes = [x.index for x in self.objs]
elif self.ignore_index:
idx = com._default_index(len(self.objs))
return idx
elif self.keys is None:
names = [None] * len(self.objs)
num = 0
has_names = False
for i, x in enumerate(self.objs):
if not isinstance(x, Series):
raise TypeError("Cannot concatenate type 'Series' "
"with object of type {type!r}"
.format(type=type(x).__name__))
if x.name is not None:
names[i] = x.name
has_names = True
else:
names[i] = num
num += 1
if has_names:
return Index(names)
else:
return com._default_index(len(self.objs))
else:
return _ensure_index(self.keys)
else:
indexes = [x._data.axes[self.axis] for x in self.objs]
if self.ignore_index:
idx = com._default_index(sum(len(i) for i in indexes))
return idx
if self.keys is None:
concat_axis = _concat_indexes(indexes)
else:
concat_axis = _make_concat_multiindex(indexes, self.keys,
self.levels, self.names)
self._maybe_check_integrity(concat_axis)
return concat_axis
def _maybe_check_integrity(self, concat_index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index.get_duplicates()
raise ValueError('Indexes have overlapping values: '
'{overlap!s}'.format(overlap=overlap))
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _make_concat_multiindex(indexes, keys, levels=None, names=None):
if ((levels is None and isinstance(keys[0], tuple)) or
(levels is not None and len(levels) > 1)):
zipped = compat.lzip(*keys)
if names is None:
names = [None] * len(zipped)
if levels is None:
_, levels = _factorize_from_iterables(zipped)
else:
levels = [_ensure_index(x) for x in levels]
else:
zipped = [keys]
if names is None:
names = [None]
if levels is None:
levels = [_ensure_index(keys)]
else:
levels = [_ensure_index(x) for x in levels]
if not _all_indexes_same(indexes):
label_list = []
# things are potentially different sizes, so compute the exact labels
# for each level and pass those to MultiIndex.from_arrays
for hlevel, level in zip(zipped, levels):
to_concat = []
for key, index in zip(hlevel, indexes):
try:
i = level.get_loc(key)
except KeyError:
raise ValueError('Key {key!s} not in level {level!s}'
.format(key=key, level=level))
to_concat.append(np.repeat(i, len(index)))
label_list.append(np.concatenate(to_concat))
concat_index = _concat_indexes(indexes)
# these go at the end
if isinstance(concat_index, MultiIndex):
levels.extend(concat_index.levels)
label_list.extend(concat_index.labels)
else:
codes, categories = _factorize_from_iterable(concat_index)
levels.append(categories)
label_list.append(codes)
if len(names) == len(levels):
names = list(names)
else:
# make sure that all of the passed indices have the same nlevels
if not len({idx.nlevels for idx in indexes}) == 1:
raise AssertionError("Cannot concat indices that do"
" not have the same number of levels")
# also copies
names = names + _get_consensus_names(indexes)
return MultiIndex(levels=levels, labels=label_list, names=names,
verify_integrity=False)
new_index = indexes[0]
n = len(new_index)
kpieces = len(indexes)
# also copies
new_names = list(names)
new_levels = list(levels)
# construct labels
new_labels = []
# do something a bit more speedy
for hlevel, level in zip(zipped, levels):
hlevel = _ensure_index(hlevel)
mapped = level.get_indexer(hlevel)
mask = mapped == -1
if mask.any():
raise ValueError('Values not found in passed level: {hlevel!s}'
.format(hlevel=hlevel[mask]))
new_labels.append(np.repeat(mapped, n))
if isinstance(new_index, MultiIndex):
new_levels.extend(new_index.levels)
new_labels.extend([np.tile(lab, kpieces) for lab in new_index.labels])
else:
new_levels.append(new_index)
new_labels.append(np.tile(np.arange(n), kpieces))
if len(new_names) < len(new_levels):
new_names.extend(new_index.names)
return MultiIndex(levels=new_levels, labels=new_labels, names=new_names,
verify_integrity=False)
|
bsd-3-clause
|
ndardenne/pymatgen
|
pymatgen/io/abinit/abitimer.py
|
2
|
26477
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides objects for extracting timing data from the ABINIT output files
It also provides tools to analye and to visualize the parallel efficiency.
"""
from __future__ import unicode_literals, division
import sys
import collections
import numpy as np
from six.moves import zip
from monty.string import is_string, list_strings
from pymatgen.util.num_utils import minloc
from pymatgen.util.plotting_utils import add_fig_kwargs, get_ax_fig_plt
import logging
logger = logging.getLogger(__name__)
def alternate(*iterables):
"""
[a[0], b[0], ... , a[1], b[1], ..., a[n], b[n] ...]
>>> alternate([1,4], [2,5], [3,6])
[1, 2, 3, 4, 5, 6]
"""
items = []
for tup in zip(*iterables):
items.extend([item for item in tup])
return items
class AbinitTimerParserError(Exception):
"""Errors raised by AbinitTimerParser"""
class AbinitTimerParser(collections.Iterable):
"""
Responsible for parsing a list of output files, and managing the parsed database.
"""
# The markers enclosing the data.
BEGIN_TAG = "-<BEGIN_TIMER"
END_TAG = "-<END_TIMER>"
Error = AbinitTimerParserError
#DEFAULT_MPI_RANK = "0"
def __init__(self):
# List of files that have been parsed.
self._filenames = []
# timers[filename][mpi_rank]
# contains the timer extracted from the file filename associated to the MPI rank mpi_rank.
self._timers = collections.OrderedDict()
def __iter__(self):
return self._timers.__iter__()
def __len__(self):
return len(self._timers)
def parse(self, filenames):
"""
Read and parse a filename or a list of filenames.
Files that cannot be opened are ignored. A single filename may also be given.
Return list of successfully read files.
"""
filenames = list_strings(filenames)
read_ok = []
for fname in filenames:
try:
fh = open(fname)
except IOError:
logger.warning("Cannot open file %s" % fname)
continue
try:
self._read(fh, fname)
read_ok.append(fname)
except self.Error as e:
logger.warning("exception while parsing file %s:\n%s" % (fname, str(e)))
continue
finally:
fh.close()
# Add read_ok to the list of files that have been parsed.
self._filenames.extend(read_ok)
return read_ok
def _read(self, fh, fname):
"""Parse the TIMER section"""
if fname in self._timers:
raise self.Error("Cannot overwrite timer associated to: %s " % fname)
data = {}
def parse_line(line):
name, vals = line[:25], line[25:].split()
ctime, cfract, wtime, wfract, ncalls, gflops = vals
return AbinitTimerSection(name, ctime, cfract, wtime, wfract, ncalls, gflops)
inside, has_timer = 0, False
for line in fh:
#print(line.strip())
if line.startswith(self.BEGIN_TAG):
has_timer = True
sections = []
info = {}
inside = 1
line = line[len(self.BEGIN_TAG):].strip()[:-1]
info["fname"] = fname
for tok in line.split(","):
(key, val) = [s.strip() for s in tok.split("=")]
info[key] = val
elif line.startswith(self.END_TAG):
inside = 0
timer = AbinitTimer(sections, info, cpu_time, wall_time)
mpi_rank = info["mpi_rank"]
data[mpi_rank] = timer
elif inside:
inside += 1
line = line[1:].strip()
if inside == 2:
d = dict()
for tok in line.split(","):
(key, val) = [s.strip() for s in tok.split("=")]
d[key] = float(val)
cpu_time, wall_time = d["cpu_time"], d["wall_time"]
elif inside > 5:
sections.append(parse_line(line))
else:
try:
parse_line(line)
except:
parser_failed = True
if not parser_failed:
raise self.Error("line should be empty: " + str(inside) + line)
if not has_timer:
raise self.Error("%s: No timer section found" % fname)
# Add it to the dict
self._timers[fname] = data
#def set_default_mpi_rank(mpi_rank): self._default_mpi_rank = mpi_rank
#def get_default_mpi_rank(mpi_rank): return self._default_mpi_rank
def timers(self, filename=None, mpi_rank="0"):
"""Return the list of timers associated to the given filename and MPI rank mpi_rank."""
if filename is not None:
timers = [self._timers[filename][mpi_rank]]
else:
timers = [self._timers[filename][mpi_rank] for filename in self._filenames]
return timers
def section_names(self, ordkey="wall_time"):
"""Return the names of sections ordered by ordkey."""
section_names = [] # Avoid UnboundLocalError
# FIXME this is not trivial
for idx, timer in enumerate(self.timers()):
if idx == 0:
section_names = [s.name for s in timer.order_sections(ordkey)]
#check = section_names
#else:
# new_set = set( [s.name for s in timer.order_sections(ordkey)])
# section_names.intersection_update(new_set)
# check = check.union(new_set)
#if check != section_names:
# print("sections", section_names)
# print("check",check)
return section_names
def get_sections(self, section_name):
"""
Return the list of sections stored in self.timers() whose name is section_name
A fake section is returned if the timer does not have sectio_name.
"""
sections = []
for timer in self.timers():
for sect in timer.sections:
if sect.name == section_name:
sections.append(sect)
break
else:
sections.append(AbinitTimerSection.fake())
return sections
def pefficiency(self):
"""
Analyze the parallel efficiency.
"""
timers = self.timers()
# Number of CPUs employed in each calculation.
ncpus = [timer.ncpus for timer in timers]
# Find the minimum number of cpus used and its index in timers.
min_idx = minloc(ncpus)
min_ncpus = ncpus[min_idx]
# Reference timer
ref_t = timers[min_idx]
# Compute the parallel efficiency (total efficieny and the efficiency of each section)
peff = {}
ctime_peff = [(min_ncpus * ref_t.wall_time) / (t.wall_time * ncp) for (t, ncp) in zip(timers, ncpus)]
wtime_peff = [(min_ncpus * ref_t.cpu_time) / (t.cpu_time * ncp) for (t, ncp) in zip(timers, ncpus)]
n = len(timers)
peff["total"] = {}
peff["total"]["cpu_time"] = ctime_peff
peff["total"]["wall_time"] = wtime_peff
peff["total"]["cpu_fract"] = n * [100]
peff["total"]["wall_fract"] = n * [100]
for sect_name in self.section_names():
#print(sect_name)
ref_sect = ref_t.get_section(sect_name)
sects = [t.get_section(sect_name) for t in timers]
try:
ctime_peff = [(min_ncpus * ref_sect.cpu_time) / (s.cpu_time * ncp) for (s, ncp) in zip(sects, ncpus)]
wtime_peff = [(min_ncpus * ref_sect.wall_time) / (s.wall_time * ncp) for (s, ncp) in zip(sects, ncpus)]
except ZeroDivisionError:
ctime_peff = n * [-1]
wtime_peff = n * [-1]
assert sect_name not in peff
peff[sect_name] = {}
peff[sect_name]["cpu_time"] = ctime_peff
peff[sect_name]["wall_time"] = wtime_peff
peff[sect_name]["cpu_fract"] = [s.cpu_fract for s in sects]
peff[sect_name]["wall_fract"] = [s.wall_fract for s in sects]
return ParallelEfficiency(self._filenames, min_idx, peff)
def summarize(self, **kwargs):
"""
Return pandas DataFrame
"""
import pandas as pd
colnames = ["fname", "wall_time", "cpu_time", "mpi_nprocs", "omp_nthreads", "mpi_rank"]
frame = pd.DataFrame(columns=colnames)
for i, timer in enumerate(self.timers()):
frame = frame.append({k: getattr(timer, k) for k in colnames}, ignore_index=True)
frame["tot_ncpus"] = frame["mpi_nprocs"] * frame["omp_nthreads"]
# Compute parallel efficiency (use the run with min number of cpus to normalize).
i = frame["tot_ncpus"].idxmin()
ref_wtime = frame.ix[i]["wall_time"]
ref_ncpus = frame.ix[i]["tot_ncpus"]
frame["peff"] = (ref_ncpus * ref_wtime) / (frame["wall_time"] * frame["tot_ncpus"])
return frame
@add_fig_kwargs
def plot_efficiency(self, key="wall_time", what="gb", nmax=5, ax=None, **kwargs):
ax, fig, plt = get_ax_fig_plt(ax=ax)
timers = self.timers()
peff = self.pefficiency()
# Table with the parallel efficiency for all the sections.
#pprint_table(peff.totable())
n = len(timers)
xx = np.arange(n)
ax.set_color_cycle(['g', 'b', 'c', 'm', 'y', 'k'])
legend_entries = []
# Plot sections with good efficiency.
lines = []
if "g" in what:
good = peff.good_sections(key=key, nmax=nmax)
for g in good:
#print(g, peff[g])
yy = peff[g][key]
line, = ax.plot(xx, yy, "-->", linewidth=3.0, markersize=10)
lines.append(line)
legend_entries.append(g)
# Plot sections with bad efficiency.
if "b" in what:
bad = peff.bad_sections(key=key, nmax=nmax)
for b in bad:
#print(b, peff[b])
yy = peff[b][key]
line, = ax.plot(xx, yy, "-.<", linewidth=3.0, markersize=10)
lines.append(line)
legend_entries.append(b)
if "total" not in legend_entries:
yy = peff["total"][key]
total_line, = ax.plot(xx, yy, "r", linewidth=3.0, markersize=10)
lines.append(total_line)
legend_entries.append("total")
ax.legend(lines, legend_entries, loc="best", shadow=True)
#ax.set_title(title)
ax.set_xlabel('Total_NCPUs')
ax.set_ylabel('Efficiency')
ax.grid(True)
# Set xticks and labels.
labels = ["MPI = %d, OMP = %d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
ax.set_xticks(xx)
ax.set_xticklabels(labels, fontdict=None, minor=False, rotation=15)
return fig
@add_fig_kwargs
def plot_pie(self, key="wall_time", minfract=0.05, ax=None, **kwargs):
"""Pie charts of the different timers."""
ax, fig, plt = get_ax_fig_plt(ax=ax)
timers = self.timers()
n = len(timers)
# Make square figures and axes
the_grid = plt.GridSpec(n, 1)
fig = plt.figure(1, figsize=(6, 6))
for idx, timer in enumerate(timers):
plt.subplot(the_grid[idx, 0])
plt.title(str(timer))
timer.pie(key=key, minfract=minfract)
return fig
@add_fig_kwargs
def plot_stacked_hist(self, key="wall_time", nmax=5, ax=None, **kwargs):
"""Stacked histogram of the different timers."""
ax, fig, plt = get_ax_fig_plt(ax=ax)
mpi_rank = "0"
timers = self.timers(mpi_rank=mpi_rank)
n = len(timers)
names, values = [], []
rest = np.zeros(n)
for idx, sname in enumerate(self.section_names(ordkey=key)):
sections = self.get_sections(sname)
svals = np.asarray([s.__dict__[key] for s in sections])
if idx < nmax:
names.append(sname)
values.append(svals)
else:
rest += svals
names.append("others (nmax = %d)" % nmax)
values.append(rest)
#for (n, vals) in zip(names, values): print(n, vals)
# The dataset is stored in values.
# Now create the stacked histogram.
ind = np.arange(n) # the locations for the groups
width = 0.35 # the width of the bars
# this does not work with matplotlib < 1.0
#plt.rcParams['axes.color_cycle'] = ['r', 'g', 'b', 'c']
colors = nmax * ['r', 'g', 'b', 'c', 'k', 'y', 'm']
bars = []
bottom = np.zeros(n)
for idx, vals in enumerate(values):
color = colors[idx]
bar = plt.bar(ind, vals, width, color=color, bottom=bottom)
bars.append(bar)
bottom += vals
ax.set_ylabel(key)
#ax.title("Stacked histogram for the %d most important sections" % nmax)
labels = ["MPI = %d, OMP = %d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
plt.xticks(ind + width / 2.0, labels, rotation=15)
#plt.yticks(np.arange(0,81,10))
ax.legend([bar[0] for bar in bars], names, loc="best")
return fig
def plot_all(self, **kwargs):
figs = []; app = figs.append
app(self.plot_efficiency())
app(self.plot_pie())
app(self.plot_stacked_hist())
return figs
class ParallelEfficiency(dict):
def __init__(self, filenames, ref_idx, *args, **kwargs):
self.update(*args, **kwargs)
self.filenames = filenames
self._ref_idx = ref_idx
def _order_by_peff(self, key, criterion, reverse=True):
estimators = {
"min": min,
"max": max,
"mean": lambda items: sum(items) / len(items)
}
self.estimator = estimators[criterion]
data = []
for (sect_name, peff) in self.items():
# Ignore values where we had a division by zero.
if all([v != -1 for v in peff[key]]):
values = peff[key][:]
#print(sect_name, values)
if len(values) > 1:
ref_value = values.pop(self._ref_idx)
assert ref_value == 1.0
data.append((sect_name, self.estimator(values)))
fsort = lambda t: t[1]
data.sort(key=fsort, reverse=reverse)
return tuple([sect_name for (sect_name, e) in data])
def totable(self, stop=None, reverse=True):
osects = self._order_by_peff("wall_time", criterion="mean", reverse=reverse)
n = len(self.filenames)
table = [["AbinitTimerSection"] + alternate(self.filenames, n * ["%"])]
for sect_name in osects:
peff = self[sect_name]["wall_time"]
fract = self[sect_name]["wall_fract"]
vals = alternate(peff, fract)
table.append([sect_name] + ["%.2f" % val for val in vals])
return table
def good_sections(self, key="wall_time", criterion="mean", nmax=5):
good_sections = self._order_by_peff(key, criterion=criterion)
return good_sections[:nmax]
def bad_sections(self, key="wall_time", criterion="mean", nmax=5):
bad_sections = self._order_by_peff(key, criterion=criterion, reverse=False)
return bad_sections[:nmax]
class AbinitTimerSection(object):
"""Record with the timing results associated to a section of code."""
STR_FIELDS = [
"name"
]
NUMERIC_FIELDS = [
"wall_time",
"wall_fract",
"cpu_time",
"cpu_fract",
"ncalls",
"gflops",
]
FIELDS = tuple(STR_FIELDS + NUMERIC_FIELDS)
@classmethod
def fake(cls):
return AbinitTimerSection("fake", 0.0, 0.0, 0.0, 0.0, -1, 0.0)
def __init__(self, name, cpu_time, cpu_fract, wall_time, wall_fract, ncalls, gflops):
self.name = name.strip()
self.cpu_time = float(cpu_time)
self.cpu_fract = float(cpu_fract)
self.wall_time = float(wall_time)
self.wall_fract = float(wall_fract)
self.ncalls = int(ncalls)
self.gflops = float(gflops)
def to_tuple(self):
return tuple([self.__dict__[at] for at in AbinitTimerSection.FIELDS])
def to_dict(self):
return {at: self.__dict__[at] for at in AbinitTimerSection.FIELDS}
def to_csvline(self, with_header=False):
"""Return a string with data in CSV format"""
string = ""
if with_header:
string += "# " + " ".join(at for at in AbinitTimerSection.FIELDS) + "\n"
string += ", ".join(str(v) for v in self.to_tuple()) + "\n"
return string
def __str__(self):
string = ""
for a in AbinitTimerSection.FIELDS: string += a + " = " + self.__dict__[a] + ","
return string[:-1]
class AbinitTimer(object):
"""Container class storing the timing results."""
def __init__(self, sections, info, cpu_time, wall_time):
# Store sections and names
self.sections = tuple(sections)
self.section_names = tuple([s.name for s in self.sections])
self.info = info
self.cpu_time = float(cpu_time)
self.wall_time = float(wall_time)
self.mpi_nprocs = int(info["mpi_nprocs"])
self.omp_nthreads = int(info["omp_nthreads"])
self.mpi_rank = info["mpi_rank"].strip()
self.fname = info["fname"].strip()
def __str__(self):
string = "file = %s, wall_time = %.1f, mpi_nprocs = %d, omp_nthreads = %d" % (
self.fname, self.wall_time, self.mpi_nprocs, self.omp_nthreads )
#string += ", rank = " + self.mpi_rank
return string
def __cmp__(self, other):
return cmp(self.wall_time, other.wall_time)
@property
def ncpus(self):
"""Total number of CPUs employed."""
return self.mpi_nprocs * self.omp_nthreads
def get_section(self, section_name):
try:
idx = self.section_names.index(section_name)
except:
raise
sect = self.sections[idx]
assert sect.name == section_name
return sect
def to_csv(self, fileobj=sys.stdout):
"""Write data on file fileobj using CSV format."""
openclose = is_string(fileobj)
if openclose:
fileobj = open(fileobj, "w")
for (idx, section) in enumerate(self.sections):
fileobj.write(section.to_csvline(with_header=(idx == 0)))
fileobj.flush()
if openclose:
fileobj.close()
def to_table(self, sort_key="wall_time", stop=None):
"""Return a table (list of lists) with timer data"""
table = [list(AbinitTimerSection.FIELDS), ]
ord_sections = self.order_sections(sort_key)
if stop is not None:
ord_sections = ord_sections[:stop]
for osect in ord_sections:
row = [str(item) for item in osect.to_tuple()]
table.append(row)
return table
# Maintain old API
totable = to_table
def get_dataframe(self, sort_key="wall_time", **kwargs):
"""
Return pandas DataFrame
"""
import pandas as pd
frame = pd.DataFrame(columns=AbinitTimerSection.FIELDS)
for osect in self.order_sections(sort_key):
frame = frame.append(osect.to_dict(), ignore_index=True)
# Monkey patch
frame.info = self.info
frame.cpu_time = self.cpu_time
frame.wall_time = self.wall_time
frame.mpi_nprocs = self.mpi_nprocs
frame.omp_nthreads = self.omp_nthreads
frame.mpi_rank = self.mpi_rank
frame.fname = self.fname
return frame
def get_values(self, keys):
"""Return a list of values associated to a particular list of keys"""
if is_string(keys):
return [s.__dict__[keys] for s in self.sections]
else:
values = []
for k in keys:
values.append([s.__dict__[k] for s in self.sections])
return values
def names_and_values(self, key, minval=None, minfract=None, sorted=True):
"""
Select the entries whose value[key] is >= minval or whose fraction[key] is >= minfract
Return the names of the sections and the correspoding value
"""
values = self.get_values(key)
names = self.get_values("name")
new_names, new_values = [], []
other_val = 0.0
if minval is not None:
assert minfract is None
for n, v in zip(names, values):
if v >= minval:
new_names.append(n)
new_values.append(v)
else:
other_val += v
new_names.append("below minval " + str(minval))
new_values.append(other_val)
elif minfract is not None:
assert minval is None
total = self.sum_sections(key)
for n, v in zip(names, values):
if v / total >= minfract:
new_names.append(n)
new_values.append(v)
else:
other_val += v
new_names.append("below minfract " + str(minfract))
new_values.append(other_val)
else:
# all values
new_names, new_values = names, values
if sorted:
# Sort new_values and rearrange new_names.
fsort = lambda t: t[1]
nandv = [nv for nv in zip(new_names, new_values)]
nandv.sort(key=fsort)
new_names, new_values = [n[0] for n in nandv], [n[1] for n in nandv]
return new_names, new_values
def _reduce_sections(self, keys, operator):
return operator(self.get_values(keys))
def sum_sections(self, keys):
return self._reduce_sections(keys, sum)
def order_sections(self, key, reverse=True):
"""Sort sections according to the value of key."""
fsort = lambda s: s.__dict__[key]
return sorted(self.sections, key=fsort, reverse=reverse)
@add_fig_kwargs
def cpuwall_histogram(self, ax=None, **kwargs):
ax, fig, plt = get_ax_fig_plt(ax=ax)
nk = len(self.sections)
ind = np.arange(nk) # the x locations for the groups
width = 0.35 # the width of the bars
cpu_times = self.get_values("cpu_time")
rects1 = plt.bar(ind, cpu_times, width, color='r')
wall_times = self.get_values("wall_time")
rects2 = plt.bar(ind + width, wall_times, width, color='y')
# Add ylable and title
ax.set_ylabel('Time (s)')
#if title:
# plt.title(title)
#else:
# plt.title('CPU-time and Wall-time for the different sections of the code')
ticks = self.get_values("name")
ax.set_xticks(ind + width, ticks)
ax.legend((rects1[0], rects2[0]), ('CPU', 'Wall'), loc="best")
return fig
#def hist2(self, key1="wall_time", key2="cpu_time"):
# labels = self.get_values("name")
# vals1, vals2 = self.get_values([key1, key2])
# N = len(vals1)
# assert N == len(vals2)
# plt.figure(1)
# plt.subplot(2, 1, 1) # 2 rows, 1 column, figure 1
# n1, bins1, patches1 = plt.hist(vals1, N, facecolor="m")
# plt.xlabel(labels)
# plt.ylabel(key1)
# plt.subplot(2, 1, 2)
# n2, bins2, patches2 = plt.hist(vals2, N, facecolor="y")
# plt.xlabel(labels)
# plt.ylabel(key2)
# plt.show()
def pie(self, key="wall_time", minfract=0.05, title=None):
import matplotlib.pyplot as plt
# Don't show section whose value is less that minfract
labels, vals = self.names_and_values(key, minfract=minfract)
return plt.pie(vals, explode=None, labels=labels, autopct='%1.1f%%', shadow=True)
def scatter_hist(self, ax=None, **kwargs):
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
ax, fig, plt = get_ax_fig_plt(ax=ax)
#title = kwargs.pop("title", None)
#show = kwargs.pop("show", True)
#savefig = kwargs.pop("savefig", None)
#fig = plt.figure(1, figsize=(5.5, 5.5))
x = np.asarray(self.get_values("cpu_time"))
y = np.asarray(self.get_values("wall_time"))
# the scatter plot:
axScatter = plt.subplot(1, 1, 1)
axScatter.scatter(x, y)
axScatter.set_aspect("auto")
# create new axes on the right and on the top of the current axes
# The first argument of the new_vertical(new_horizontal) method is
# the height (width) of the axes to be created in inches.
divider = make_axes_locatable(axScatter)
axHistx = divider.append_axes("top", 1.2, pad=0.1, sharex=axScatter)
axHisty = divider.append_axes("right", 1.2, pad=0.1, sharey=axScatter)
# make some labels invisible
plt.setp(axHistx.get_xticklabels() + axHisty.get_yticklabels(), visible=False)
# now determine nice limits by hand:
binwidth = 0.25
xymax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])
lim = (int(xymax / binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
# the xaxis of axHistx and yaxis of axHisty are shared with axScatter,
# thus there is no need to manually adjust the xlim and ylim of these axis.
#axHistx.axis["bottom"].major_ticklabels.set_visible(False)
for tl in axHistx.get_xticklabels():
tl.set_visible(False)
axHistx.set_yticks([0, 50, 100])
#axHisty.axis["left"].major_ticklabels.set_visible(False)
for tl in axHisty.get_yticklabels():
tl.set_visible(False)
axHisty.set_xticks([0, 50, 100])
plt.draw()
return fig
|
mit
|
vshtanko/scikit-learn
|
sklearn/decomposition/pca.py
|
192
|
23117
|
""" Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
|
bsd-3-clause
|
IntelLabs/hpat
|
examples/series/series_quantile.py
|
1
|
1768
|
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
@njit
def series_quantile():
s = pd.Series([1, 2, 3, 4])
median = .5 # compute median
out_series = s.quantile(median)
return out_series # Expect median value == 2.5
print(series_quantile())
|
bsd-2-clause
|
laugustyniak/textlytics
|
examples/sentiment_process_test.py
|
1
|
16579
|
# -*- coding: utf-8 -*-
from pprint import pprint
from sklearn.linear_model import LogisticRegression
from textlytics.sentiment.document_preprocessing import \
DocumentPreprocessor
__author__ = 'Lukasz Augustyniak'
import glob
import pickle
import logging
import pandas as pd
from os import path
from datetime import datetime
from textlytics.sentiment.sentiment import Sentiment
from textlytics.sentiment.io_sentiment import results_to_pickle
logging.basicConfig(filename='processing.log',
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s')
ALL_CLASSIFIERS = {
# 'BernoulliNB': BernoulliNB(),
# 'GaussianNB': GaussianNB(),
# 'MultinomialNB': MultinomialNB(),
# 'DecisionTreeClassifier': DecisionTreeClassifier(),
# 'RandomForestClassifier': RandomForestClassifier(),
'LogisticRegression': LogisticRegression(),
# 'LinearSVC': LinearSVC(),
# 'Perceptron': Perceptron(),
# 'SVC': SVC(),
# 'AdaBoostClassifier': AdaBoostClassifier(),
# 'SVR': SVR(),
# 'NuSVC': NuSVC(),
# 'NuSVR': NuSVR(),
# 'OneClassSVM': OneClassSVM(),
# 'ExtraTreeClassifier': ExtraTreeClassifier()
}
def test_ensemble(dataset, source):
# ############################# LEXICONS ##################################
# dictionary for all predicted values
lexicons_predictions = {}
sentiment = Sentiment()
print datetime.datetime.now()
# lexicons_files = [
# 'AFINN-96.txt',
# 'AFINN-111.txt',
# # 'amazon_movies_25.txt',
# 'Bing-Liu.txt',
# 'enchantedlearning.com.txt',
# 'past_future_list.txt',
# 'past_future_list_plus.txt',
# 'simple_list.txt',
# 'simple_list_plus.txt',
# 'simplest.txt'
# ]
#
# category_lexicons = [
# 'amazon_automotive_5.txt',
# 'amazon_automotive_25.txt',
# 'amazon_books_5.txt',
# 'amazon_books_25.txt',
# 'amazon_electronics_5.txt',
# 'amazon_electronics_25.txt',
# 'amazon_health_5.txt',
# 'amazon_health_25.txt',
# 'amazon_movies_5.txt']
#
# for cl in category_lexicons:
# if cl.split('_')[1] in dataset.lower():
# lexicons_files.append(cl)
# print cl
# df, lexicon_prediction, lexicon_result, classes = \
# sentiment.lexicon_based_sentiment(
# f_name=dataset,
# sentiment_level='Document',
# lexicons_files=lexicons_files,
# words_stem=False)
# lexicons_predictions.update(lexicon_prediction)
# to_pickle(dataset, 'predictions', lexicon_prediction)
# to_pickle(dataset, 'lexicons', lexicon_result)
# pprint(lexicon_result)
# ############################# ENSEMBLE LEXICONS #########################
# ensemble_lexicons = SentimentEnsemble(classes=classes)
# ensemble_results = ensemble_lexicons.sentiment_ensemble_lexi_ml(
# lexicon_predictions=lexicons_predictions,
# ml_predictions={},
# classifiers=ALL_CLASSIFIERS,
# n_folds=2
# )
# to_pickle(dataset, 'ensemble-lexicons-only', ensemble_results)
# ############################# features_ngrams ############################
# all n grams to test
features_ngrams = {
'unigrams': (1, 1),
# 'bigrams': (2, 2),
# 'trigrams': (3, 3),
# 'n_grams_1_2': (1, 2),
# 'n_grams_1_3': (1, 3),
# 'n_grams_2_3': (2, 3)
}
logging.info(features_ngrams)
# dictionary for machine learning predictions (part of feature set for
# second step in ensemble approach)
ml_predictions = {}
############################# TfidfVectorizer ############################
# for n_gram_name, n_grams_range in features_ngrams.iteritems():
# print n_gram_name
# print 'TfidfVectorizer'
# f_name = n_gram_name + '_TfidfVectorizer'
# classes, ml_prediction, results_ml = sentiment.machine_learning_sentiment(
# file_name=dataset,
# worksheet_name='Arkusz1',
# n_gram_range=n_grams_range,
# n_folds=10,
# classifiers=ALL_CLASSIFIERS,
# # classifiers={'GaussianNB': GaussianNB()},
# # classifiers=None, # all classifier available in sentiment class
# amazon=True,
# lowercase=True,
# stop_words='english',
# max_df=1.0,
# min_df=0.0,
# max_features=None,
# results_filename=f_name,
# vectorizer='TfidfVectorizer',
# # tokenizer=document_preprocessor.tokenizer_with_stemming
# )
# # add all prediction dictionaries into feature set
# ml_predictions.update(ml_prediction)
# to_pickle(dataset, n_gram_name + '-' + f_name, results_ml)
# ############################# CountVectorizer ############################
for n_gram_name, n_grams_range in features_ngrams.iteritems():
print n_gram_name
print 'CountVectorizer'
f_name = n_gram_name + '_CountVectorizer'
classes, ml_prediction, results_ml = sentiment.supervised_sentiment(
dataset=dataset,
# worksheet_name='Arkusz1',
n_gram_range=n_grams_range,
n_folds=10,
# classifiers={'GaussianNB': GaussianNB()},
# classifiers=None, # all classifier available in sentiment class
classifiers=ALL_CLASSIFIERS,
# amazon=True,
lowercase=True,
stop_words='english',
max_df=1.0,
min_df=0.0,
max_features=None,
f_name_results=f_name,
vectorizer='CountVectorizer',
# tokenizer=document_preprocessor.tokenizer_with_stemming
source=source
)
ml_predictions.update(ml_prediction)
results_to_pickle(dataset, n_gram_name + '-' + f_name, results_ml)
pprint(results_ml)
# pprint(lexicons_predictions)
# pprint(ml_predictions)
# ############################# ENSEMBLE ###################################
# ensemble = SentimentEnsemble(classes=classes)
# ensemble_results = ensemble.sentiment_ensemble_lexi_ml(
# lexicon_predictions=lexicons_predictions,
# ml_predictions=ml_predictions,
# classifiers=ALL_CLASSIFIERS,
# n_folds=10
# )
# to_pickle(dataset, 'ensemble', ensemble_results)
# ############################# OTHER ######################################
# sentiment.machine_learning_sentiment(
# file_name='Amazon-500x150-balanced.xlsx',
# worksheet_name='Arkusz1',
# n_gram_range=(1, 3),
# # classifiers={'GaussianNB': GaussianNB()},
# # classifiers={},
# amazon=True)
#
# sentiment.machine_learning_sentiment(
# file_name='Amazon-500x150-balanced.xlsx',
# worksheet_name='Arkusz1',
# n_gram_range=(1, 2),
# classifiers={'GaussianNB': GaussianNB()},
# # classifiers={},
# amazon_dataset=True)
#
# sentiment.machine_learning_sentiment(
# file_name='Amazon-500x150-balanced.xlsx',
# worksheet_name='Arkusz1',
# n_gram_range=(1, 1),
# classifiers={'GaussianNB': GaussianNB()},
# # classifiers={},
# amazon_dataset=True)
#
# # tylko pozytywne i negatywne
# sentiment.machine_learning_sentiment(
# file_name=path.join('Amazon-4k-pos-neg.xls'),
# # file_name=path.join('Amazon-500x150-balanced.xlsx'),
# worksheet_name='Arkusz1',
# # classifiers={'GaussianNB': GaussianNB()},
# classifiers={},
# amazon_dataset=True,
# progress_interval=3)
# ############################# TEST RUNNING ###################################
#
# parser = argparse.ArgumentParser()
# parser.add_argument("dataset", help="path to dataset file")
# args = parser.parse_args()
# test_ensemble(dataset=args.dataset)
# test_ensemble(dataset='Amazon-500x150-balanced.xlsx')
# test_ensemble(dataset='Automotive9600.csv')
# test_ensemble(dataset='Books9600.csv')
# test_ensemble(dataset='Health & Personal Care9600.csv')
# test_ensemble(dataset='Movies & TV9600.csv')
# test_ensemble(dataset='Movies & TV3200.csv')
# test_ensemble(dataset='Movies_&_TV1200.csv')
# test_ensemble(dataset='Movies & TV-1-3-5-x-1000.csv')
# test_ensemble(dataset='Music9600.csv')
# test_ensemble(dataset='semeval2013.csv', source='semeval2013')
# test_ensemble(dataset='semeval2014.csv', source='semeval2014')
# test_ensemble(dataset='Automotive200.csv', source='amazon')
# test_ensemble(dataset='Amazon-7.xlsx')
# test_ensemble()
# ############################# chosen kfolds #################################
# @memory_profiler.profile
def get_dataset_with_kfolds_indexes_train(base_path='/home/engine/csv/',
dataset_filter='', n_reviews=2000,
source='amazon'):
train_test_path = path.join(base_path, 'train_test_subsets')
# pickle_pattern = path.join(train_test_path, '*%s*.pkl' % dataset_filter)
# train_test_pickles = glob.glob(pickle_pattern)
datasets = glob.glob(
path.join(base_path, '*%s*.txt.gz.csv' % dataset_filter))
print 'Datasets:'
pprint(datasets)
for dataset in datasets:
dataset_name = path.basename(dataset)
print 'Dataset name: %s' % dataset_name
dp = DocumentPreprocessor()
df = pd.DataFrame.from_csv(dataset, sep=';', index_col=False)
df, _ = dp.star_score_to_sentiment(df,
score_column='review/score')
# extract only Document and Sentiment columns
df['Document'] = df['review/text']
df = df[['Sentiment', 'Document']]
indexes_all = set(df.index)
print 'all indexes: %s' % len(indexes_all)
try:
# load train/test sets folds
f_path = path.join(train_test_path, 'train-test-%s-%s.pkl'
'' % (n_reviews, dataset_name))
with open(f_path, 'rb') as f:
train_test_indexes = pickle.load(f)
sentiment = Sentiment()
features_ngrams = {
'unigrams': (1, 1),
# 'n_grams_1_2': (1, 2),
# 'n_grams_1_3': (1, 3),
}
logging.info(features_ngrams)
ml_predictions = {}
# ############################# CountVectorizer ############################
for n_gram_name, n_grams_range in features_ngrams.iteritems():
print n_gram_name
print 'CountVectorizer'
f_name = n_gram_name + '_CountVectorizer'
classes, ml_prediction, results_ml = sentiment.supervised_sentiment(
dataset=df,
n_gram_range=n_grams_range,
classifiers=ALL_CLASSIFIERS,
lowercase=True,
stop_words='english',
max_df=1.0,
min_df=0.0,
max_features=None,
f_name_results=f_name,
vectorizer='CountVectorizer',
source=source,
kfolds_indexes=train_test_indexes,
dataset_name=dataset_name
)
ml_predictions.update(ml_prediction)
results_to_pickle(dataset=dataset_name, f_name=f_name,
obj=results_ml)
pprint(results_ml)
except IOError as err:
logging.error('%s not loaded' % dataset_name)
print str(err)
raise
# get_dataset_with_kfolds_indexes_train(
# # base_path='/mnt/sdc2/Lukasz/Datasets/amazon-cats/csv/',
# # base_path='C:/Datasets/Amazon/csv/',
# base_path='/home/engine/csv',
# # dataset_filter='ell',
# dataset_filter='Electr'
# )
# @memory_profiler.profile
def get_dataset_with_kfolds_indexes(base_path='/home/engine/csv/',
dataset_filter='', n_reviews=2000,
source='amazon',
features_ngrams={'unigrams': (1, 1)}):
datasets = glob.glob(path.join(
base_path, '*%s*.txt.gz.csv' % dataset_filter))
print 'Datasets:'
pprint(datasets)
for dataset in datasets:
dataset_name = path.basename(dataset)
print 'Dataset name: %s' % dataset_name
dp = DocumentPreprocessor()
df = pd.DataFrame.from_csv(dataset, sep=';', index_col=False)
df, _ = dp.star_score_to_sentiment(df, score_column='review/score')
# extract only Document and Sentiment columns
df['Document'] = df['review/text']
df = df[['Sentiment', 'Document']]
try:
sent = Sentiment()
logging.info(features_ngrams)
ml_predictions = {}
results = []
for n_gram_name, n_grams_range in features_ngrams.iteritems():
print n_gram_name
print 'CountVectorizer'
f_name = n_gram_name + '_CountVectorizer'
classes, ml_prediction, results_ml = sent.supervised_sentiment(
dataset=df,
n_gram_range=n_grams_range,
classifiers=ALL_CLASSIFIERS,
lowercase=True,
stop_words='english',
max_df=1.0,
min_df=0.0,
max_features=None,
f_name_results=f_name,
vectorizer='CountVectorizer',
source=source,
dataset_name=dataset_name,
n_folds=5
)
results.append(results_ml)
# ml_predictions.update(ml_prediction)
results_to_pickle(dataset=dataset_name, f_name=f_name,
obj=results)
pprint(results_ml)
except IOError as err:
logging.error('%s not loaded' % dataset_name)
print str(err)
raise
get_dataset_with_kfolds_indexes(
# base_path='/mnt/sdc2/Lukasz/Datasets/amazon-cats/csv/',
# base_path='C:/Datasets/Amazon/csv/',
base_path='/home/engine/csv',
# dataset_filter='ell',
dataset_filter='Automo',
features_ngrams={
'unigrams': (1, 1),
# 'n_grams_1_2': (1, 2),
# 'n_grams_1_3': (1, 3),
}
)
# @memory_profiler.profile
def lexicons_ensemble(dataset='semeval2013', source=None):
sentiment = Sentiment()
lexicons_predictions = {}
lexicons_files = [
'AFINN-96.txt',
'AFINN-111.txt',
# 'amazon_movies_25.txt',
'Bing-Liu.txt',
'enchantedlearning.com.txt',
'past_future_list.txt',
'past_future_list_plus.txt',
'simple_list.txt',
'simple_list_plus.txt',
'simplest.txt'
]
category_lexicons = [
'amazon_automotive_5.txt',
'amazon_automotive_25.txt',
'amazon_books_5.txt',
'amazon_books_25.txt',
'amazon_electronics_5.txt',
'amazon_electronics_25.txt',
'amazon_health_5.txt',
'amazon_health_25.txt',
'amazon_movies_5.txt']
for cl in category_lexicons:
if cl.split('_')[1] in dataset.lower():
lexicons_files.append(cl)
print cl
df, lexicon_prediction, lexicon_result, classes = \
sentiment.lexicon_based_sentiment(
dataset=dataset,
sentiment_level='Document',
lexicons_files=lexicons_files,
words_stem=False,
n_jobs=None
)
print
print 'results: '
pprint(lexicon_result)
print
lexicons_predictions.update(lexicon_prediction)
results_to_pickle(dataset, 'predictions', lexicon_prediction)
results_to_pickle(dataset, 'lexicons', lexicon_result)
# ############################ ENSEMBLE LEXICONS #########################
# ensemble_lexicons = SentimentEnsemble(classes=classes)
# ensemble_results = ensemble_lexicons.sentiment_ensemble_lexi_ml(
# lexicon_predictions=lexicons_predictions,
# ml_predictions={},
# classifiers=ALL_CLASSIFIERS,
# n_folds=2
# )
# results_to_pickle(dataset, 'ensemble-lexicons-only', ensemble_results)
# lexicons_ensemble()
|
mit
|
leggitta/mne-python
|
mne/commands/mne_browse_raw.py
|
18
|
3986
|
#!/usr/bin/env python
"""Browse raw data
You can do for example:
$ mne browse_raw --raw sample_audvis_raw.fif \
--proj sample_audvis_ecg_proj.fif \
--eve sample_audvis_raw-eve.fif
"""
# Authors : Eric Larson, PhD
import sys
import mne
def run():
import matplotlib.pyplot as plt
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option("--raw", dest="raw_in",
help="Input raw FIF file", metavar="FILE")
parser.add_option("--proj", dest="proj_in",
help="Projector file", metavar="FILE",
default='')
parser.add_option("--eve", dest="eve_in",
help="Events file", metavar="FILE",
default='')
parser.add_option("-d", "--duration", dest="duration", type="float",
help="Time window for plotting (sec)",
default=10.0)
parser.add_option("-t", "--start", dest="start", type="float",
help="Initial start time for plotting",
default=0.0)
parser.add_option("-n", "--n_channels", dest="n_channels", type="int",
help="Number of channels to plot at a time",
default=20)
parser.add_option("-o", "--order", dest="order",
help="Order for plotting ('type' or 'original')",
default='type')
parser.add_option("-p", "--preload", dest="preload",
help="Preload raw data (for faster navigaton)",
default=False)
parser.add_option("-s", "--show_options", dest="show_options",
help="Show projection options dialog",
default=False)
parser.add_option("--allowmaxshield", dest="maxshield",
help="Allow loading MaxShield processed data",
action="store_true")
parser.add_option("--highpass", dest="highpass", type="float",
help="Display high-pass filter corner frequency",
default=-1)
parser.add_option("--lowpass", dest="lowpass", type="float",
help="Display low-pass filter corner frequency",
default=-1)
parser.add_option("--filtorder", dest="filtorder", type="int",
help="Display filtering IIR order",
default=4)
parser.add_option("--clipping", dest="clipping",
help="Enable trace clipping mode, either 'clip' or "
"'transparent'", default=None)
options, args = parser.parse_args()
raw_in = options.raw_in
duration = options.duration
start = options.start
n_channels = options.n_channels
order = options.order
preload = options.preload
show_options = options.show_options
proj_in = options.proj_in
eve_in = options.eve_in
maxshield = options.maxshield
highpass = options.highpass
lowpass = options.lowpass
filtorder = options.filtorder
clipping = options.clipping
if raw_in is None:
parser.print_help()
sys.exit(1)
raw = mne.io.Raw(raw_in, preload=preload, allow_maxshield=maxshield)
if len(proj_in) > 0:
projs = mne.read_proj(proj_in)
raw.info['projs'] = projs
if len(eve_in) > 0:
events = mne.read_events(eve_in)
else:
events = None
highpass = None if highpass < 0 or filtorder <= 0 else highpass
lowpass = None if lowpass < 0 or filtorder <= 0 else lowpass
filtorder = 4 if filtorder <= 0 else filtorder
raw.plot(duration=duration, start=start, n_channels=n_channels,
order=order, show_options=show_options, events=events,
highpass=highpass, lowpass=lowpass, filtorder=filtorder,
clipping=clipping)
plt.show(block=True)
is_main = (__name__ == '__main__')
if is_main:
run()
|
bsd-3-clause
|
ssh0/growing-string
|
triangular_lattice/max_radius.py
|
1
|
1541
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2016-10-05
import time
import numpy as np
from tqdm import tqdm
from multiprocessing import Pool
from growing_string import Main
import matplotlib.pyplot as plt
max_dists = []
sample_num = 10
betas_num = 10
betas = np.linspace(0., 10., num=betas_num)
frames = 400
L = 1000
params = {
'Lx': L,
'Ly': L,
'frames': frames,
'size': [3,] * 1,
'plot': False,
'save_image': False,
'strings': [{'id': 1, 'x': L/4, 'y': L/2, 'vec': [0, 4]}],
}
def calc_max_radius(beta):
return max([_calc_max_radius(beta) for i in range(sample_num)])
def _calc_max_radius(beta):
main = Main(beta=beta, **params)
s = main.strings[0]
N = float(len(s.vec) + 1)
pos = list(s.pos.T)
x = main.lattice_X[pos]
y = main.lattice_Y[pos]
X = main.lattice_X[L / 4, L / 2]
Y = main.lattice_Y[L / 4, L / 2]
r = np.sqrt((x - X) ** 2 + (y - Y) ** 2)
return np.max(r)
pool = Pool(6)
ite = pool.imap(calc_max_radius, betas)
for ret in tqdm(ite, total=betas_num):
max_dists.append(ret)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(betas, max_dists)
ax.set_title("Max distance from the starting point")
ax.set_xlim(0, max(betas) + 1)
ax.set_ylim(min(max_dists) - 0.1, max(1., max(max_dists) + 0.1))
ax.set_xlabel(r'$\beta$')
ax.set_ylabel(r'$R$')
fn = "./results/img/max_radius/"
fn += "frames=%d" % frames + "_sample=%d" % sample_num
fn += time.strftime("_%y%m%d_%H%M%S")
fn += ".png"
fig.savefig(fn)
plt.close()
|
mit
|
ville-k/tensorflow
|
tensorflow/contrib/learn/python/learn/dataframe/dataframe.py
|
27
|
4836
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataFrame is a container for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from .series import Series
from .transform import Transform
from tensorflow.python.util.deprecation import deprecated
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
@deprecated("2017-06-15", "contrib/learn/dataframe/** is deprecated.")
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if v is None:
del self._columns[k]
elif isinstance(v, Series):
self._columns[k] = v
elif isinstance(v, Transform) and v.input_valency() == 0:
self._columns[k] = v()
else:
raise TypeError(
"Column in assignment must be an inflow.Series, inflow.Transform,"
" or None; got type '%s'." % type(v).__name__)
def select_columns(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def exclude_columns(self, exclude_keys):
"""Returns a new DataFrame with all columns not excluded via exclude_keys.
Args:
exclude_keys: A list of strings. Each should be the name of a column in
the DataFrame. These columns will be excluded from the result.
Returns:
A new DataFrame containing all columns except those specified.
"""
result = type(self)()
for key, value in self._columns.items():
if key not in exclude_keys:
result[key] = value
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Series):
value = [value]
self.assign(**dict(zip(key, value)))
def __delitem__(self, key):
if isinstance(key, str):
key = [key]
value = [None for _ in key]
self.assign(**dict(zip(key, value)))
def build(self, **kwargs):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache, **kwargs)
for name, c in self._columns.items()}
return tensors
|
apache-2.0
|
simonsgit/bachelor_stuff
|
tests/first_test.py
|
1
|
3513
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 24 18:02:05 2015
@author: stamylew
"""
from cut_block import cut_block
from handle_h5 import read_h5
from handle_h5 import save_h5
from create_membrane import filter_membrane
import numpy as np
from binarize_labels import binarize_dense_labels
from random_labels import get_number_of_labels, limit_label
from quality import accuracy, precision, recall
import matplotlib.pyplot as plt
#create raw test block
#a = read_h5("/mnt/CLAWS1/stamilev/data/d.h5")
#b = cut_block(a, 25, 725, 0, 700, 0, 700)
#save_h5(b, "/home/stamylew/volumes/test_data.h5", "useful", None)
#c = read_h5("/home/stamylew/volumes/test_data.h5", "useful")
#d = cut_block(c, 300, 375, 465, 540, 0, 75)
#print d.shape
#save_h5(d, "/home/stamylew/volumes/test_cube1.h5", "75cube_raw", None)
#create gt and membrane test block
#e = read_h5("/mnt/CLAWS1/stamilev/data/ids_i_c_manualbigignore.h5")
#f = cut_block(e, 25, 725, 0, 700, 0, 700)
#save_h5(f, "/home/stamylew/volumes/test_gt.h5", "useful", None)
#g = read_h5("/home/stamylew/volumes/test_gt.h5", "useful")
#h = cut_block(g, 300, 375, 465, 540, 0, 75)
##save_h5(h, "/home/stamylew/volumes/test_cube1.h5", "75cube_gt", None)
#
#i = filter_membrane(h)
#save_h5(i, "/home/stamylew/volumes/test_cube1.h5", "75cube_memb", None)
#binarize trained data
#
#j = read_h5("/home/stamylew/src/autocontext/training/cache/0000_test_cube1_probs.h5")
#k = np.squeeze(j)
#l = k[:,:,:,0]
#m = binarize_dense_labels(l, 0.2, 0.8)
#save_h5(m, "/home/stamylew/volumes/test_cube1a.h5", "rand500_n4", None)
#label number
#labels = read_h5("/home/stamylew/75cube_Labels.h5", "exported_data")
#nol = get_number_of_labels(labels)
#print nol
#limited = limit_label(labels, 499)
#save_h5(limited, "/home/stamylew/75cube_Labels_rand_500.h5", "", None)
#compare gt to predict
##for different number of loops
n = read_h5("/home/stamylew/volumes/test_cube1.h5", "75cube_memb")
o = read_h5("/home/stamylew/volumes/test_cube1a.h5", "bin_n3")
p = read_h5("/home/stamylew/volumes/test_cube1.h5", "bin_n4")
q = read_h5("/home/stamylew/volumes/test_cube1.h5", "bin_n5")
r = read_h5("/home/stamylew/volumes/test_cube1a.h5", "bin_n6")
s = read_h5("/home/stamylew/volumes/test_cube1.h5", "bin_n7")
t = read_h5("/home/stamylew/volumes/test_cube1.h5", "bin_n8")
u = read_h5("/home/stamylew/volumes/test_cube1a.h5", "bin_n9")
v = read_h5("/home/stamylew/volumes/test_cube1a.h5", "bin_n2")
w = read_h5("/home/stamylew/volumes/test_cube1a.h5", "bin_n1")
y = read_h5("/home/stamylew/volumes/test_cube1a.h5", "bin_n10")
z = read_h5("/home/stamylew/volumes/test_cube1a.h5", "bin_n11")
#x = read_h5("/home/stamylew/src/autocontext/training/cache/0000_test_cube1_probs.h5")
#x = np.squeeze(x)
#x = x[:,:,:,0]
##for different label limits
#n = read_h5("/home/stamylew/volumes/test_cube1.h5", "75cube_memb")
#o = read_h5("/home/stamylew/volumes/test_cube1a.h5", "rand500_n4")
#plotting results
a = accuracy
b = precision
c = recall
plt.plot([1,2,3,4,5,6,7,8,9,10,11],[a(n,w),a(n,v),a(n,o),a(n,p),a(n,q),a(n,r),a(n,s),a(n,t),a(n,u),a(n,y),a(n,z)],'r-',
[1,2,3,4,5,6,7,8,9,10,11],[b(n,w),b(n,v),b(n,o),b(n,p),b(n,q),b(n,r),b(n,s),b(n,t),b(n,u),b(n,y),b(n,z)],'b-',
[1,2,3,4,5,6,7,8,9,10,11],[c(n,w),c(n,v),c(n,o),c(n,p),c(n,q),c(n,r),c(n,s),c(n,t),c(n,u),c(n,y),c(n,z)],'g-')
plt.axis([-1,12,0,1])
plt.show()
#plt.plot([0,1],[a(n,o),a(n,p)],'ro', [0,1],[b(n,o),b(n,p)],'bo', [0,1], [c(n,o),c(n,p)], 'go')
#plt.axis([-1,2, 0,1])
#plt.show()
print "done"
|
mit
|
kazemakase/scikit-learn
|
examples/cluster/plot_kmeans_digits.py
|
230
|
4524
|
"""
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
NeurotechBerkeley/bci-course
|
lab6/rpeakdetect.py
|
1
|
3699
|
"""
Copyright (c) 2013 Jami Pekkanen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
import numpy as np
import scipy.signal
import scipy.ndimage
def detect_beats(
ecg, # The raw ECG signal
rate, # Sampling rate in HZ
# Window size in seconds to use for
ransac_window_size=5.0,
# Low frequency of the band pass filter
lowfreq=5.0,
# High frequency of the band pass filter
highfreq=15.0,
):
"""
ECG heart beat detection based on
http://link.springer.com/article/10.1007/s13239-011-0065-3/fulltext.html
with some tweaks (mainly robust estimation of the rectified signal
cutoff threshold).
"""
ransac_window_size = int(ransac_window_size*rate)
lowpass = scipy.signal.butter(1, highfreq/(rate/2.0), 'low')
highpass = scipy.signal.butter(1, lowfreq/(rate/2.0), 'high')
# TODO: Could use an actual bandpass filter
ecg_low = scipy.signal.filtfilt(*lowpass, x=ecg)
ecg_band = scipy.signal.filtfilt(*highpass, x=ecg_low)
# Square (=signal power) of the first difference of the signal
decg = np.diff(ecg_band)
decg_power = decg**2
# Robust threshold and normalizator estimation
thresholds = []
max_powers = []
for i in range(int(len(decg_power)/ransac_window_size)):
sample = slice(i*ransac_window_size, (i+1)*ransac_window_size)
d = decg_power[sample]
thresholds.append(0.5*np.std(d))
max_powers.append(np.max(d))
threshold = 0.5*np.std(decg_power)
threshold = np.median(thresholds)
max_power = np.median(max_powers)
decg_power[decg_power < threshold] = 0
decg_power /= max_power
decg_power[decg_power > 1.0] = 1.0
square_decg_power = decg_power**2
shannon_energy = -square_decg_power*np.log(square_decg_power)
shannon_energy[~np.isfinite(shannon_energy)] = 0.0
mean_window_len = int(rate*0.125+1)
lp_energy = np.convolve(shannon_energy, [1.0/mean_window_len]*mean_window_len, mode='same')
#lp_energy = scipy.signal.filtfilt(*lowpass2, x=shannon_energy)
lp_energy = scipy.ndimage.gaussian_filter1d(lp_energy, rate/8.0)
lp_energy_diff = np.diff(lp_energy)
zero_crossings = (lp_energy_diff[:-1] > 0) & (lp_energy_diff[1:] < 0)
zero_crossings = np.flatnonzero(zero_crossings)
zero_crossings -= 1
return zero_crossings
def plot_peak_detection(ecg, rate):
import matplotlib.pyplot as plt
dt = 1.0/rate
t = np.linspace(0, len(ecg)*dt, len(ecg))
plt.plot(t, ecg)
peak_i = detect_beats(ecg, rate)
plt.scatter(t[peak_i], ecg[peak_i], color='red')
plt.show()
if __name__ == '__main__':
rate = float(sys.argv[1])
ecg = np.loadtxt(sys.stdin)
if len(sys.argv) > 2 and sys.argv[2] == 'plot':
plot_peak_detection(ecg, rate)
else:
peaks = detect_beats(ecg, rate)
sys.stdout.write("\n".join(map(str, peaks)))
sys.stdout.write("\n")
|
mit
|
CDNoyes/EDL-Py
|
EntryGuidance/HPC.py
|
1
|
15290
|
""" Differential algebra based replanning using the parametrized planning method.
aka hybrid predictor corrector
"""
from Simulation import Simulation, Cycle, EntrySim, TimedSim
from Triggers import SRPTrigger, AccelerationTrigger
from InitialState import InitialState
from Uncertainty import getUncertainty
from ParametrizedPlanner import profile,profile2
import Apollo
from pyaudi import gdual_double as gd
from pyaudi import abs, sqrt
import numpy as np
import matplotlib.pyplot as plt
from functools import partial
from scipy.interpolate import interp1d
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from Utils.RK4 import RK4
from Utils import DA as da
fs = 14 # Global fontsize for plotting
class controllerState(object):
def __init__(self):
self.tReplan = 0
self.nReplan = 0
def reset(self):
self.tReplan = 0
self.nReplan = 0
def controller(time, current_state, switch, bank0, reference, lon_target, lat_target, **kwargs):
if not hasattr(controller, 'bank'): #or time < tReplan (we've gone back in time so its a new sim) # Perform a number of initializations
print "Initializing HPC controller."
controller.bankvars = ['bank{}'.format(i) for i,b in enumerate(bank0)]
controller.bank = np.array([gd(val,var,2) for val,var in zip(bank0,controller.bankvars)])
controller.ref = reference # Also need to update the reference each time we replan
states = ['RangeControl']
conditions = [SRPTrigger(0.0,700,100)]
input = { 'states' : states,
'conditions' : conditions }
# controller.sim = Simulation(cycle=Cycle(1), output=True, use_da=True, **EntrySim()) # So that the simulation doesn't get rebuilt unnecessarily
controller.sim = Simulation(cycle=Cycle(1), output=False, use_da=True, **input) # So that the simulation doesn't get rebuilt unnecessarily
controller.nReplan = 0
controller.tReplan = 0
r,theta,phi,v,gamma,psi,s,m=current_state
# Determine if a replanning is needed
nReplanMax = 5
tReplanMin = 30 # Minimum time between replannings
# print "In HPC controller: Range Error {} km".format(np.abs(s-controller.ref(v))/1000)
if v < 5300 and np.abs(s-controller.ref(v)) > 4e3 and controller.nReplan < nReplanMax and (time-controller.tReplan) > tReplanMin:
# Don't check until velocity is monotonic, less than max replans, hasn't passed the last switch
print "Replanning triggered"
controller.nReplan += 1
controller.tReplan = time
# One option is just to compare estimated range to go with the reference value at the same (velocity/energy/time)
# Another option is to track using Apollo between plannings, and use the predicted range to go as the replanning tool
# However, we should also be taking into account crossrange objective, so maybe a single quick integration to check sometimes?
traj = predict(controller.sim, current_state, bankProfile = lambda **d: profile(d['time'], [sw-time for sw in switch], controller.bank), AR=kwargs['aero_ratios']) # Need to pass DA bank variables
nInactive = np.sum(np.asarray(switch)<time) # The number of switches already passed (and therefore removed from optimization), but never remove the last element
dbank = optimize(traj, lon_target, lat_target, controller.bankvars, nInactive)
controller.bank += dbank
# Need to evaluate traj at dbank, then use those states to update the reference
vel = da.evaluate(traj[:,7], controller.bankvars, [dbank]).flatten()
vel = np.flipud(vel) # Flipped to be increasing for interp1d limitation
range = da.const(traj[:,10],array=True)
range = da.evaluate(traj[:,10], controller.bankvars, [dbank]).flatten()
rtg = np.flipud(range[-1]*1e3-range*1e3) # Range to go
# plt.figure()
# plt.plot(vel,controller.ref(vel))
try:
controller.ref = interp1d(vel, rtg, fill_value=(rtg[0],rtg[-1]), assume_sorted=True, bounds_error=False, kind='linear')
except:
print "Updating controller reference failed, using previous reference"
# plt.plot(da.const(traj[:,7]),(traj[-1,10].constant_cf - da.const(traj[:,10],array=True))[::-1]*1e3)
# plt.plot(v,s,'r*')
# plt.plot(vel, controller.ref(vel),'k--')
# plt.show()
# If not, or if replanning is done:
# Simply evaluate profile
bank = profile(time, switch, da.const(controller.bank),order=1)
return bank
def predict(sim, x0, bankProfile, AR):
output = sim.run(x0,[bankProfile],StepsPerCycle=10,AeroRatios=AR)
return output
def optimize(DA_traj, longitude_target, latitude_target, bankvars, nInactive):
# NOTICE: targets must be given in degrees!!
xf = DA_traj[-1]
print "Predicted final state: {}".format(da.const(xf)[4:10])
# Test some basic optimization:
f = (xf[5]-longitude_target)**2 + (xf[6]-latitude_target)**2 # Lat/lon target - works well
# f = ((xf[3]-6.4)**2 + (1/10000.)*(xf[7]-800.0)**2) # Alt/vel target - combination doesn't work well
# f = (xf[3]-6.9)**2 # Alt target - works well
# f = (xf[7]-840.0)**2 # Vel target - works well
# f = -xf[3] # Maximizing altitude
# Relaxed Newton Method:
dopt = newton_step(f, bankvars, nInactive)
dopt *= 15*np.pi/180/np.max(np.abs(dopt)) # Restricts the largest step size
dopt = line_search(f, dopt, bankvars) # Estimates the best step size along dopt
print "delta Bank: {}".format(dopt*180/np.pi)
xf_opt = da.evaluate(xf,bankvars,[dopt])[0]
print "New final state: {}".format(xf_opt[4:10])
return np.asarray(dopt)
def newton_step(f, vars, nInactive):
""" Returns the step direction based on gradient and hessian info """
g = da.gradient(f, vars)
H = da.hessian(f, vars)
nu = len(g)
#TODO watch out for non-invertible hessians
res = [0]*nInactive # Pad with zeros for the inactive segments
d = -np.dot(np.linalg.inv(H[nInactive:nu,nInactive:nu]),g[nInactive:nu])
res.extend(d)
return np.array(res)
def line_search(f, dir, vars):
""" Performs a "brute force" line search along a given direction by simply evaluating
the function at a large number of points and taking the lowest value found.
No further refinement is done.
"""
dirs = [a*dir for a in np.linspace(0,1,2000)]
fnew = da.evaluate([f], vars, dirs).flatten()
i = np.argmin(fnew)
return dir*np.linspace(0,1,2000)[i]
def grid_search(f,vars):
from scipy.optimize import differential_evolution as DE
# dopt = DE(__grid_search_fun, args=(f,vars), popsize=500, bounds=((0,np.pi/4),(-np.pi/4,0),(-np.pi/4,np.pi/18)),disp=True,tol=1e-5) # True reasonable bounds
dopt = DE(__grid_search_fun, args=(f,vars), popsize=100, bounds=((-np.pi/18,np.pi/4.5),(-np.pi/4.5,np.pi/18),(-np.pi/4,np.pi/18)),disp=False,tol=1e-2) # Bigger bounds
return dopt.x
def __grid_search_fun(x, f, vars):
return da.evaluate([f], vars, [x])[0,0]
# def optimize_profile():
# #################################################
# ################ Test Functions #################
# #################################################
def test_profile():
""" Tests the bank profile for various numbers of switches using standard python variables. """
lw = 2
t = np.linspace(0,220,5000)
label = ['Discontinuous','Continuous','Once Differentiable']
for order in range(3):
bank = profile(t, [70,115,150],[-np.pi/2, np.pi/2,-np.pi/9,np.pi/9],order=order)
plt.plot(t,np.array(bank)*180/np.pi,label=label[order],lineWidth=lw)
plt.xlabel('Time (s)',fontsize=fs)
plt.ylabel('Bank angle (deg)',fontsize=fs)
plt.legend()
plt.axis([0,220,-95,95])
plt.show()
def test_da_profile2():
""" Performs the same tests but utilizing DA variables with profile2 """
t = np.linspace(0,200,500)
order = 1
bank_inp = [gd(val,'bank{}'.format(i),order) for i,val in enumerate([-np.pi/2, np.pi/2,-np.pi/9,np.pi/9])]
switch = [-10,70,115,] + [gd(val,'s{}'.format(i),order) for i,val in enumerate([150])] + [250]
bank = np.array([profile2(ti, switch, bank_inp) for ti in t ])
plt.plot(t, da.const(bank, array=True)*180/np.pi,'k--')
dbank = (-1+2*np.random.random([5,len(bank_inp)]))*np.pi/9
dswitch = (-1+2*np.random.random([5,len(bank_inp)-3]))*10.
eval_pt = np.concatenate((dbank,dswitch),axis=1)
vars = ['bank{}'.format(i) for i in range(4)] + ['s{}'.format(i) for i in range(1)]
bank_new = da.evaluate(bank,vars,eval_pt)
for bn in bank_new:
plt.plot(t,bn*180/np.pi)
plt.show()
def test_da_profile():
""" Performs the same tests but utilizing DA variables """
t = np.linspace(0,200,500)
bank_inp = [gd(val,'bank{}'.format(i),2) for i,val in enumerate([-np.pi/2, np.pi/2,-np.pi/9,np.pi/9])]
bank = profile(t, [70,115,150], bank_inp)
plt.plot(t, da.const(bank, array=True)*180/np.pi,'k--')
dbank = (-1+2*np.random.random([5,len(bank_inp)]))*np.pi/9
bank_new = da.evaluate(bank,['bank{}'.format(i) for i,b in enumerate(bank_inp)],dbank)
for bn in bank_new:
plt.plot(t,bn*180/np.pi)
plt.show()
def test_expansion():
''' Integrates a trajectory with nominal bank angle
Then expands around different bank angles
Then integrates true trajectories using those bank angles
And compares
'''
import time
tf = 220
reference_sim = Simulation(cycle=Cycle(1),output=False,**TimedSim(tf))
da_sim = Simulation(cycle=Cycle(1), output=True, use_da=True, **TimedSim(tf))
banks = [-np.pi/2, np.pi/2,-np.pi/9]
bankvars = ['bank{}'.format(i) for i,b in enumerate(banks)]
bank_inp = [gd(val,'bank{}'.format(i),1) for i,val in enumerate(banks)]
bankProfile = lambda **d: profile(d['time'],[89.3607, 136.276], bank_inp)
x0 = InitialState()
t0 = time.time()
output = da_sim.run(x0,[bankProfile],StepsPerCycle=10)
tda = time.time()
print "DA integration time {}".format(tda-t0)
xf = output[-1]
# Test some basic optimization:
# f = (xf[5]+71.5)**2 + (xf[6]+41.4)**2 # Lat/lon target - works well
# f = ((xf[3]-6.4)**2 + (1/10000.)*(xf[7]-800.0)**2) # Alt/vel target - combination doesn't work well
# f = (xf[3]-6.9)**2 # Alt target - works well
# f = (xf[7]-840.0)**2 # Vel target - works well
f = -xf[3] # Maximizing altitude
# Relaxed Newton Method:
# dopt = newton_step(f, bankvars)
# dopt *= 15*np.pi/180/np.max(np.abs(dopt)) # Restricts the largest step size
# dopt = line_search(f, dopt, bankvars) # Estimates the best step size along dopt
# print "delta Bank from single newton step: {}".format(dopt*180/np.pi)
dopt = np.zeros_like(banks)
# dopt = grid_search(f, bankvars) # Brute force, could work well since evaluating is so fast
# print "delta Bank from DE: {}".format(dopt*180/np.pi)
xf_opt = da.evaluate(xf,bankvars,[dopt])[0]
dbank = (-1+2*np.random.random([500,len(bank_inp)]))*np.pi/9
xf_new = da.evaluate(xf,bankvars,dbank)
teval = time.time()
print "DA evaluation time {}".format(teval-tda)
plt.figure(1)
plt.plot(xf[7].constant_cf,xf[3].constant_cf,'kx')
# plt.plot(xf_opt[7],xf_opt[3],'k^')
for xfn in xf_new:
plt.plot(xfn[7],xfn[3],'o',fillstyle='none')
plt.figure(2)
plt.plot(xf[5].constant_cf,xf[6].constant_cf,'kx')
# plt.plot(xf_opt[5],xf_opt[6],'k^')
for xfn in xf_new:
plt.plot(xfn[5],xfn[6],'o',fillstyle='none')
plt.figure(3)
plt.plot(xf[8].constant_cf,xf[9].constant_cf,'kx')
# plt.plot(xf_opt[8],xf_opt[9],'k^')
for xfn in xf_new:
plt.plot(xfn[8],xfn[9],'o',fillstyle='none')
if True:
xf_new_true = []
t0 = time.time()
for delta in dbank:
bankProfile_double = lambda **d: profile(d['time'],[89.3607, 136.276], [a+b for a,b in zip(delta,banks)])
output = reference_sim.run(x0,[bankProfile_double])
xf_new_true.append(output[-1])
plt.figure(1)
plt.plot(output[-1,7], output[-1,3],'x')
plt.figure(2)
plt.plot(output[-1,5], output[-1,6],'x')
plt.figure(3)
plt.plot(output[-1,8], output[-1,9],'x')
tint = time.time()
print "Integration times for truth comparison {} (includes plotting)".format(tint-t0)
xf_new_true = np.array(xf_new_true)
err = np.abs(xf_new-xf_new_true)
dbanknorm = np.linalg.norm(dbank,axis=1)
label =['Altitude error (m)','Longitude error (deg)','Latitude error (deg)','Velocity error (m/s)', 'Flight path error (deg)', 'Heading error (deg)']
# conversion = [1,np.pi/180,np.pi/180,1,np.pi/180,np.pi/180]
for i in range(6):
plt.figure(4+i)
plt.plot(dbanknorm*180/np.pi,err[:,i+4],'ko')
plt.xlabel('Norm of Bank Deviations (deg)',fontsize=fs)
plt.ylabel(label[i],fontsize=fs)
# Add labels to each plot
plt.figure(1)
plt.ylabel('Altitude (km)')
plt.xlabel('Velocity (m/s)')
plt.figure(2)
plt.ylabel('Longitude (deg)')
plt.xlabel('Latitude (deg)')
plt.figure(3)
plt.ylabel('Flight path angle (deg)')
plt.xlabel('Heading angle (deg)')
plt.show()
def test_controller():
import MPC as mpc
# Plan the nominal profile:
reference_sim = Simulation(cycle=Cycle(1),output=False,**EntrySim())
switch = [129, 190]
bank = [-np.radians(80), np.radians(80), -np.radians(30)]
bankProfile = lambda **d: profile(d['time'],switch=switch, bank=bank,order=2)
x0 = InitialState()
output = reference_sim.run(x0,[bankProfile])
references = reference_sim.getRef()
drag_ref = references['drag']
# Create the simulation model:
states = ['PreEntry','RangeControl']
conditions = [AccelerationTrigger('drag',2), SRPTrigger(0,700,10)] # TODO: The final trigger should be an input
input = { 'states' : states,
'conditions' : conditions }
sim = Simulation(cycle=Cycle(1), output=True, **input)
# Create guidance laws
pre = partial(mpc.constant, value=bankProfile(time=0))
hpc = partial(controller, switch=switch, bank0=bank, reference=references['rangeToGo'], lon_target=output[-1,5], lat_target=output[-1,6])
controls = [pre, hpc]
# Run the off-nominal simulation
perturb = getUncertainty()['parametric']
sample = None
# sample = perturb.sample()
# print sample
# sample = [.1,-.1,-.05,0]
sample = [.133,-.133,.0368,.0014] # Worst case sample from Apollo runs
# sample = [-0.12,0.0, 0,0]
s0 = reference_sim.history[0,6]-reference_sim.history[-1,6] # This ensures the range to go is 0 at the target for the real simulation
x0_full = InitialState(1, range=s0, bank=np.radians(-80))
# Single trajectory
reference_sim.plot(plotEnergy=True, legend=False)
output = sim.run(x0_full, controls, sample, FullEDL=True)
sim.plot(compare=False)
sim.show()
if __name__ == "__main__":
# test_da_profile()
test_da_profile2()
# test_expansion()
# test_controller()
|
gpl-3.0
|
birdsarah/bokeh
|
examples/glyphs/trail.py
|
33
|
4656
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from math import sin, cos, atan2, sqrt, radians
import numpy as np
import scipy.ndimage as im
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.browserlib import view
from bokeh.models.glyphs import Line, Patches
from bokeh.models.widgets import VBox
from bokeh.models import (
Plot, GMapPlot, GMapOptions,
DataRange1d, ColumnDataSource,
LinearAxis, Grid,
PanTool, WheelZoomTool, ResetTool)
from bokeh.sampledata.mtb import obiszow_mtb_xcm
def haversin(theta):
return sin(0.5*theta)**2
def distance(p1, p2):
"""Distance between (lat1, lon1) and (lat2, lon2). """
R = 6371
lat1, lon1 = p1
lat2, lon2 = p2
phi1 = radians(lat1)
phi2 = radians(lat2)
delta_lat = radians(lat2 - lat1)
delta_lon = radians(lon2 - lon1)
a = haversin(delta_lat) + cos(phi1)*cos(phi2)*haversin(delta_lon)
return 2*R*atan2(sqrt(a), sqrt(1-a))
def prep_data(dataset):
df = dataset.copy()
latlon = list(zip(df.lat, df.lon))
dist = np.array([ distance(latlon[i+1], latlon[i]) for i in range(len((latlon[:-1]))) ])
df["dist"] = np.concatenate(([0], np.cumsum(dist)))
slope = np.abs(100*np.diff(df.alt)/(1000*dist))
slope[np.where( slope < 4) ] = 0 # "green"
slope[np.where((slope >= 4) & (slope < 6))] = 1 # "yellow"
slope[np.where((slope >= 6) & (slope < 10))] = 2 # "pink"
slope[np.where((slope >= 10) & (slope < 15))] = 3 # "orange"
slope[np.where( slope >= 15 )] = 4 # "red"
slope = im.median_filter(slope, 6)
colors = np.empty_like(slope, dtype=object)
colors[np.where(slope == 0)] = "green"
colors[np.where(slope == 1)] = "yellow"
colors[np.where(slope == 2)] = "pink"
colors[np.where(slope == 3)] = "orange"
colors[np.where(slope == 4)] = "red"
df["colors"] = list(colors) + [None] # NOTE: add [None] just make pandas happy
return df
title = "Obiszów MTB XCM"
def trail_map(data):
lon = (min(data.lon) + max(data.lon))/2
lat = (min(data.lat) + max(data.lat))/2
map_options = GMapOptions(lng=lon, lat=lat, zoom=13)
plot = GMapPlot(title="%s - Trail Map" % title, map_options=map_options, plot_width=800, plot_height=800)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
xgrid = Grid(plot=plot, dimension=0, ticker=xaxis.ticker, grid_line_dash="dashed", grid_line_color="gray")
ygrid = Grid(plot=plot, dimension=1, ticker=yaxis.ticker, grid_line_dash="dashed", grid_line_color="gray")
plot.renderers.extend([xgrid, ygrid])
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
line_source = ColumnDataSource(dict(x=data.lon, y=data.lat, dist=data.dist))
line = Line(x="x", y="y", line_color="blue", line_width=2)
plot.add_glyph(line_source, line)
plot.x_range = DataRange1d()
plot.y_range = DataRange1d()
return plot
def altitude_profile(data):
plot = Plot(title="%s - Altitude Profile" % title, plot_width=800, plot_height=400)
xaxis = LinearAxis(axis_label="Distance (km)")
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_label="Altitude (m)")
plot.add_layout(yaxis, 'left')
xgrid = Grid(plot=plot, dimension=0, ticker=xaxis.ticker)
ygrid = Grid(plot=plot, dimension=1, ticker=yaxis.ticker)
plot.renderers.extend([xgrid, ygrid])
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
X, Y = data.dist, data.alt
y0 = min(Y)
patches_source = ColumnDataSource(dict(
xs = [ [X[i], X[i+1], X[i+1], X[i]] for i in range(len(X[:-1])) ],
ys = [ [y0, y0, Y[i+1], Y[i]] for i in range(len(Y[:-1])) ],
color = data.colors[:-1]
))
patches = Patches(xs="xs", ys="ys", fill_color="color", line_color="color")
plot.add_glyph(patches_source, patches)
line_source = ColumnDataSource(dict(
x = data.dist,
y = data.alt,
))
line = Line(x='x', y='y', line_color="black", line_width=1)
plot.add_glyph(line_source, line)
plot.x_range = DataRange1d()
plot.y_range = DataRange1d()
return plot
data = prep_data(obiszow_mtb_xcm)
trail = trail_map(data)
altitude = altitude_profile(data)
layout = VBox(children=[altitude, trail])
doc = Document()
doc.add(layout)
if __name__ == "__main__":
filename = "trail.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Trail map and altitude profile"))
print("Wrote %s" % filename)
view(filename)
|
bsd-3-clause
|
xiaoxiamii/scikit-learn
|
sklearn/manifold/tests/test_spectral_embedding.py
|
216
|
8091
|
from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
|
bsd-3-clause
|
moutai/scikit-learn
|
sklearn/tests/test_multioutput.py
|
39
|
6609
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier
from sklearn.linear_model import Lasso
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier
def test_multi_target_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
for n in range(3):
rgr = GradientBoostingRegressor(random_state=0)
rgr.fit(X_train, y_train[:, n])
references[:,n] = rgr.predict(X_test)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X_train, y_train)
y_pred = rgr.predict(X_test)
assert_almost_equal(references, y_pred)
def test_multi_target_regression_one_target():
# Test multi target regression raises
X, y = datasets.make_regression(n_targets=1)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
assert_raises(ValueError, rgr.fit, X_train, y_train)
def test_multi_target_sparse_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
rgr = MultiOutputRegressor(Lasso(random_state=0))
rgr_sparse = MultiOutputRegressor(Lasso(random_state=0))
rgr.fit(X_train, y_train)
rgr_sparse.fit(sparse(X_train), y_train)
assert_almost_equal(rgr.predict(X_test), rgr_sparse.predict(sparse(X_test)))
def test_multi_target_sample_weights_api():
X = [[1,2,3], [4,5,6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [0.8, 0.6]
rgr = MultiOutputRegressor(Lasso())
assert_raises_regex(ValueError, "does not support sample weights",
rgr.fit, X, y, w)
# no exception should be raised if the base estimator supports weights
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y, w)
def test_multi_target_sample_weights():
# weighted regressor
Xw = [[1,2,3], [4,5,6]]
yw = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1,2,3], [1,2,3], [4,5,6]]
y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y)
X_test = [[1.5,2.5,3.5], [3.5,4.5,5.5]]
assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test))
# Import the data
iris = datasets.load_iris()
# create a multiple targets by randomized shuffling and concatenating y.
X = iris.data
y1 = iris.target
y2 = shuffle(y1, random_state=1)
y3 = shuffle(y1, random_state=2)
y = np.column_stack((y1, y2, y3))
n_samples, n_features = X.shape
n_outputs = y.shape[1]
n_classes = len(np.unique(y1))
def test_multi_output_classification():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict, prodict_proba and score
forest = RandomForestClassifier(n_estimators=10, random_state=1)
multi_target_forest = MultiOutputClassifier(forest)
# train the multi_target_forest and also get the predictions.
multi_target_forest.fit(X, y)
predictions = multi_target_forest.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
predict_proba = multi_target_forest.predict_proba(X)
assert_equal((n_samples, n_classes, n_outputs), predict_proba.shape)
assert_array_equal(np.argmax(predict_proba, axis=1), predictions)
# train the forest with each column and assert that predictions are equal
for i in range(3):
forest_ = clone(forest) # create a clone with the same state
forest_.fit(X, y[:, i])
assert_equal(list(forest_.predict(X)), list(predictions[:, i]))
assert_array_equal(list(forest_.predict_proba(X)),
list(predict_proba[:, :, i]))
def test_multiclass_multioutput_estimator():
# test to check meta of meta estimators
svc = LinearSVC(random_state=0)
multi_class_svc = OneVsRestClassifier(svc)
multi_target_svc = MultiOutputClassifier(multi_class_svc)
multi_target_svc.fit(X, y)
predictions = multi_target_svc.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
# train the forest with each column and assert that predictions are equal
for i in range(3):
multi_class_svc_ = clone(multi_class_svc) # create a clone
multi_class_svc_.fit(X, y[:, i])
assert_equal(list(multi_class_svc_.predict(X)),
list(predictions[:, i]))
def test_multi_output_classification_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3, 2], [2, 3]]
w = np.asarray([2., 1.])
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf_w = MultiOutputClassifier(forest)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3, 2], [3, 2], [2, 3]]
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf = MultiOutputClassifier(forest)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_exceptions():
# NotFittedError when fit is not done but score, predict and
# and predict_proba are called
moc = MultiOutputClassifier(LinearSVC(random_state=0))
assert_raises(NotFittedError, moc.predict, y)
assert_raises(NotFittedError, moc.predict_proba, y)
assert_raises(NotFittedError, moc.score, X, y)
# ValueError when number of outputs is different
# for fit and score
y_new = np.column_stack((y1, y2))
moc.fit(X, y)
assert_raises(ValueError, moc.score, X, y_new)
|
bsd-3-clause
|
mhue/scikit-learn
|
sklearn/cross_decomposition/cca_.py
|
209
|
3150
|
from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
|
bsd-3-clause
|
dkillick/cartopy
|
lib/cartopy/tests/mpl/test_examples.py
|
2
|
1636
|
# (C) British Crown Copyright 2011 - 2014, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import numpy as np
import matplotlib.pyplot as plt
from cartopy.tests.mpl import ImageTesting
class ExampleImageTesting(ImageTesting):
"""Subclasses ImageTesting to nullify the plt.show commands."""
def __call__(self, test_func):
fn = ImageTesting.__call__(self, test_func)
def new_fn(*args, **kwargs):
try:
show = plt.show
plt.show = lambda *args, **kwargs: None
r = fn(*args, **kwargs)
finally:
plt.show = show
return r
new_fn.__name__ = fn.__name__
return new_fn
@ExampleImageTesting(['global_map'])
def test_global_map():
import cartopy.examples.global_map as c
c.main()
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
lgpl-3.0
|
henrynj/PMLMC
|
plot/euler_old/plot_fixed_ml.py
|
1
|
2278
|
#!/usr/bin/env python
import sys
import numpy as np
import matplotlib.pyplot as plt
def read_variance():
level = []
var_q = []
var_yl = []
filename = 'mlmc_convergence_test'
fh = open(filename, 'r')
for line in fh:
# Recognise convergence test lines from
# the fact that line[1] is an integer
if line[0] == ' ' and '0' <= line[1] <= '9':
splitline = [float(x) for x in line.split()]
level.append(splitline[0])
var_q.append(splitline[5])
var_yl.append(splitline[7])
fh.close()
var_q = np.array(var_q)
var_yl = np.array(var_yl)
return var_q, var_yl
def calculate_optimal_number(level, var, eps, gamma = 1.4):
""" calculate optimal number of samples on each level,
based on eq.(3.1), Multilevel Monte Carlo methods, Giles, 2015.
"""
### read var_l from mlmc_convergence_test
cost = np.array( [4**(l*gamma) for l in level] )
NM_opt = np.ceil( 2 * np.sqrt( var / cost ) * \
sum( np.sqrt( var * cost ) ) / ( eps**2) )
return NM_opt.astype(int)
def compute_mc_cost(v, eps, l, gamma):
nm = int( np.ceil(2*v/(eps**2)) )
cost = nm * 4**(l*gamma)
return cost
def compute_mlmc_cost(nm, gamma, l0):
cost_ml = 0
for l, n in enumerate(nm):
if l==0:
cost_ml += n * 4**( gamma*(l+l0) )
else:
cost_ml += n * 4**( gamma*(l+l0)) #* ( 1+4**(-gamma) )
return cost_ml
def estimate_c(L=3):
""" calculate the eps that level L mesh grid can reach"""
del3 = []
l = []
qoi_name = 'FlowAngleOut'
filename = '../TestCase/su2_ls89/mc_data_%s.dat' %qoi_name
fh = open(filename, 'r')
for line in fh:
if line[0] == ' ' and '0' <= line[1] <= '9':
splitline = [float(x) for x in line.split()]
l.append(splitline[0])
del3.append(splitline[3])
A = l[1:]
B = np.log2( np.abs(del3[1:]) )
pa = np.polyfit(A, B, 1)
alpha = -pa[0] / 2
c = 2**pa[1]
eps = c * 2**(-2*L*alpha)
print eps
return eps
if __name__ == '__main__':
plot()
|
gpl-3.0
|
sgenoud/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
3
|
2866
|
# Author: Lars Buitinck <[email protected]>
# License: BSD-style.
from random import Random
import numpy as np
import scipy.sparse as sp
from nose.tools import assert_equal
from nose.tools import assert_true
from nose.tools import assert_false
from numpy.testing import assert_array_equal
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
v = DictVectorizer(sparse=sparse, dtype=dtype)
X = v.fit_transform(D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# COO matrices can't be compared for equality
assert_array_equal(X.A, v.transform(D).A)
else:
assert_array_equal(X, v.transform(D))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in xrange(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in xrange(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_features():
D = [{"camelot": 0, "spamalot": 1}]
v = DictVectorizer(sparse=False).fit(D)
X = v.transform({"push the pram a lot": 2})
assert_array_equal(X, np.zeros((1, 2)))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
|
bsd-3-clause
|
kaichogami/scikit-learn
|
examples/feature_selection/plot_feature_selection.py
|
95
|
2847
|
"""
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='darkorange')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight',
color='navy')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='c')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
|
bsd-3-clause
|
svallaghe/libmesh
|
doc/statistics/libmesh_pagehits.py
|
1
|
9839
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
# Import stuff for working with dates
from datetime import datetime
from matplotlib.dates import date2num
# Hits/month, pages, and gigabytes served.
# To get the Google analytics data:
# .) Go to analytics.google.com.
# .) There should be (as of July 2017) a "Google Analytics Home" box at the top left of the dashboard.
# .) Click the "Audience Overview" link at the bottom right corner of this box.
# .) Adjust date range to previous month.
# .) Record the number of "Pageviews" in the "Hits" column below.
# The data below are from the libmesh.github.io site, which uses the
# number UA-24978333-1.
#
# Note: we do not have control over the analytics for the
# https://www.github.com/libMesh/libmesh page. If you look at the page
# source, analytics code UA-3769691-2 appears, but if I try to add
# this property in my analytics account, Google assigns me the number
# UA-24978333-{2,3,...} (where the last digit may change depending on
# how many times you tried to add/remove this property in the
# Analytics Dashboard) and there does not seem to be a straightforward
# way of inserting this code into the source. There have been some
# README.md based hacks for doing this in the past, but I don't think
# they are particularly reliable...
# Hits, pages, GB served
data = [
# 'Jan 2003', 616, 616, 0
# 'Feb 2003', 2078, 2078, 0,
# 'Mar 2003', 3157, 3157, 0,
# 'Apr 2003', 7800, 7800, 0,
# 'May 2003', 4627, 4627, 0,
# 'Jun 2003', 6156, 6156, 0,
# 'Jul 2003', 6389, 6389, 0,
# 'Aug 2003', 10136, 10136, 0,
# 'Sep 2003', 8871, 8871, 0,
# 'Oct 2003', 9703, 9703, 0,
# 'Nov 2003', 9802, 9802, 0,
# 'Dec 2003', 9123, 9123, 0,
# 'Jan 2004', 13599, 13599, 0,
# 'Feb 2004', 11018, 11018, 0,
# 'Mar 2004', 11713, 11713, 0,
# 'Apr 2004', 14995, 14995, 0,
# 'May 2004', 11285, 11285, 0,
# 'Jun 2004', 12974, 12974, 0,
# 'Jul 2004', 12939, 12939, 0,
# 'Aug 2004', 9708, 9708, 0,
# 'Sep 2004', 7994, 7994, 0,
# 'Oct 2004', 6920, 6920, 0,
# 'Nov 2004', 10261, 10261, 0,
# 'Dec 2004', 7483, 7483, 0,
# 'Jan 2005', 3184, 3184, 0,
# 'Feb 2005', 37733, 14077, .4373,
# 'Mar 2005', 43927, 16408, .5637,
# 'Apr 2005', 29792, 8518, .2890,
# 'May 2005', 51288, 17629, .5689,
# 'Jun 2005', 40617, 16599, .5379,
# 'Jul 2005', 29944, 10006, .3363,
# 'Aug 2005', 39592, 14556, .4577,
# 'Sep 2005', 57638, 14666, .4881,
# 'Oct 2005', 48336, 17976, .5749,
# 'Nov 2005', 49563, 15308, .5810,
# 'Dec 2005', 90863, 40736, .9415,
# 'Jan 2006', 46723, 13487, .5662,
# 'Feb 2006', 62285, 26567, .8229,
# 'Mar 2006', 47446, 14711, .6534,
# 'Apr 2006', 90314, 29635, .9762,
# 'May 2006', 68209, 20998, .7949,
# 'Jun 2006', 50495, 17128, .6881,
# 'Jul 2006', 42387, 10958, .6016,
# 'Aug 2006', 55658, 11793, .6174,
# 'Sep 2006', 54919, 20591, .9056,
# 'Oct 2006', 52916, 17944, .9015,
# 'Nov 2006', 55382, 19833, .9439,
# 'Dec 2006', 54265, 22688, .9162,
# 'Jan 2007', 53813, 19881, 1.0 ,
# 'Feb 2007', 52434, 17920, .9472,
# 'Mar 2007', 61530, 21172, 1.2,
# 'Apr 2007', 125578, 77539, 1.3,
# 'May 2007', 182764, 129596, 1.6,
# 'Jun 2007', 115730, 38571, 1.7,
# 'Jul 2007', 121054, 42757, 1.8,
# 'Aug 2007', 81192, 28187, 1.3,
# 'Sep 2007', 143553, 39734, 2.3,
# 'Oct 2007', 110449, 42111, 2.4,
# 'Nov 2007', 128307, 57851, 2.3,
# 'Dec 2007', 80584, 42631, 2.0,
# 'Jan 2008', 69623, 34155, 2.0,
# 'Feb 2008', 144881, 111751, 2.5,
# 'Mar 2008', 69801, 29211, 1.9,
# 'Apr 2008', 74023, 31149, 2.0,
# 'May 2008', 63123, 23277, 1.8,
# 'Jun 2008', 66055, 25418, 2.1,
# 'Jul 2008', 60046, 22082, 2.0,
# 'Aug 2008', 60206, 24543, 2.0,
# 'Sep 2008', 53057, 18635, 1.6,
# 'Oct 2008', 64828, 27042, 2.1,
# 'Nov 2008', 72406, 29767, 2.3,
# 'Dec 2008', 76248, 31690, 2.3,
# 'Jan 2009', 73002, 29744, 2.0,
# 'Feb 2009', 70801, 29156, 2.1,
# 'Mar 2009', 78200, 31139, 2.1,
# 'Apr 2009', 70888, 26182, 1.7,
# 'May 2009', 67263, 26210, 1.8,
# 'Jun 2009', 73146, 31328, 2.6,
# 'Jul 2009', 77828, 33711, 2.4,
# 'Aug 2009', 64378, 28542, 1.9,
# 'Sep 2009', 76167, 33484, 2.2,
# 'Oct 2009', 95727, 41062, 2.8,
# 'Nov 2009', 88042, 38869, 2.5,
# 'Dec 2009', 76148, 37609, 2.3,
# 'Jan 2010', 268856, 45983, 3.2,
# 'Feb 2010', 208210, 42680, 3.0,
# 'Mar 2010', 116263, 42660, 2.6,
# 'Apr 2010', 102493, 32942, 2.4,
# 'May 2010', 117023, 37107, 2.5,
# 'Jun 2010', 128589, 38019, 2.5,
# 'Jul 2010', 87183, 34026, 2.2,
# 'Aug 2010', 99161, 33199, 2.5,
# 'Sep 2010', 81657, 32305, 2.5,
# 'Oct 2010', 98236, 42091, 3.4,
# 'Nov 2010', 115603, 48695, 3.4,
# 'Dec 2010', 105030, 45570, 3.4,
# 'Jan 2011', 133476, 43549, 3.1,
# 'Feb 2011', 34483, 15002, 1.1,
# 'Mar 2011', 0, 0, 0.0,
# 'Apr 2011', 0, 0, 0.0,
# 'May 2011', 0, 0, 0.0,
# 'Jun 2011', 0, 0, 0.0,
# 'Jul 2011', 0, 0, 0.0,
'Aug 2011', 10185, 0, 0.0, # New "Pageviews" data from google analytics, does not seem comparable to sf.net pagehits data
'Sep 2011', 10305, 0, 0.0,
'Oct 2011', 14081, 0, 0.0,
'Nov 2011', 13397, 0, 0.0,
'Dec 2011', 13729, 0, 0.0,
'Jan 2012', 11050, 0, 0.0,
'Feb 2012', 12779, 0, 0.0,
'Mar 2012', 12970, 0, 0.0,
'Apr 2012', 13051, 0, 0.0,
'May 2012', 11857, 0, 0.0,
'Jun 2012', 12584, 0, 0.0,
'Jul 2012', 12995, 0, 0.0,
'Aug 2012', 13204, 0, 0.0,
'Sep 2012', 13170, 0, 0.0,
'Oct 2012', 13335, 0, 0.0,
'Nov 2012', 11337, 0, 0.0,
'Dec 2012', 10108, 0, 0.0, # libmesh switched to github on December 10, 2012
'Jan 2013', 13029, 0, 0.0,
'Feb 2013', 10420, 0, 0.0,
'Mar 2013', 13400, 0, 0.0,
'Apr 2013', 14416, 0, 0.0,
'May 2013', 13875, 0, 0.0,
'Jun 2013', 13747, 0, 0.0,
'Jul 2013', 14019, 0, 0.0,
'Aug 2013', 10828, 0, 0.0,
'Sep 2013', 9969, 0, 0.0,
'Oct 2013', 13083, 0, 0.0,
'Nov 2013', 12938, 0, 0.0,
'Dec 2013', 9079, 0, 0.0,
'Jan 2014', 9736, 0, 0.0,
'Feb 2014', 11824, 0, 0.0,
'Mar 2014', 10861, 0, 0.0,
'Apr 2014', 12711, 0, 0.0,
'May 2014', 11177, 0, 0.0,
'Jun 2014', 10738, 0, 0.0,
'Jul 2014', 10349, 0, 0.0,
'Aug 2014', 8877, 0, 0.0,
'Sep 2014', 9226, 0, 0.0,
'Oct 2014', 8052, 0, 0.0, # Google analytics number moved over to libmesh.github.io in Oct 2014
'Nov 2014', 9243, 0, 0.0,
'Dec 2014', 10714, 0, 0.0,
'Jan 2015', 11508, 0, 0.0,
'Feb 2015', 11278, 0, 0.0,
'Mar 2015', 13305, 0, 0.0,
'Apr 2015', 12347, 0, 0.0,
'May 2015', 11368, 0, 0.0,
'Jun 2015', 11203, 0, 0.0,
'Jul 2015', 10419, 0, 0.0,
'Aug 2015', 11282, 0, 0.0,
'Sep 2015', 13535, 0, 0.0,
'Oct 2015', 12912, 0, 0.0,
'Nov 2015', 13894, 0, 0.0,
'Dec 2015', 11694, 0, 0.0,
'Jan 2016', 11837, 0, 0.0,
'Feb 2016', 14102, 0, 0.0,
'Mar 2016', 13212, 0, 0.0,
'Apr 2016', 13355, 0, 0.0,
'May 2016', 12486, 0, 0.0,
'Jun 2016', 13973, 0, 0.0,
'Jul 2016', 10688, 0, 0.0,
'Aug 2016', 10048, 0, 0.0,
'Sep 2016', 10847, 0, 0.0,
'Oct 2016', 10984, 0, 0.0,
'Nov 2016', 12233, 0, 0.0,
'Dec 2016', 11430, 0, 0.0,
'Jan 2017', 10327, 0, 0.0,
'Feb 2017', 11039, 0, 0.0,
'Mar 2017', 12986, 0, 0.0,
'Apr 2017', 9773, 0, 0.0,
'May 2017', 10880, 0, 0.0,
'Jun 2017', 9179, 0, 0.0,
'Jul 2017', 8344, 0, 0.0,
'Aug 2017', 8617, 0, 0.0,
'Sep 2017', 8576, 0, 0.0,
]
# Extract number of hits/month
n_hits_month = data[1::4]
# Divide by 1000 for plotting...
n_hits_month = np.divide(n_hits_month, 1000.)
# Extract list of date strings
date_strings = data[0::4]
# Convert date strings into numbers
date_nums = []
for d in date_strings:
date_nums.append(date2num(datetime.strptime(d, '%b %Y')))
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax = fig.add_subplot(111)
# Make the bar chart. We have one number/month, there are about 30
# days in each month, this defines the bar width...
# The color used comes from sns.color_palette("muted").as_hex() They
# are the "same basic order of hues as the default matplotlib color
# cycle but more attractive colors."
ax.bar(date_nums, n_hits_month, width=30, color=u'#4878cf')
# Create title
fig.suptitle('LibMesh Page Hits/Month (in Thousands)')
# Set up x-tick locations -- August of each year
ticks_names = ['2012', '2013', '2014', '2015', '2016', '2017']
# Get numerical values for the names
tick_nums = []
for x in ticks_names:
tick_nums.append(date2num(datetime.strptime('Jan ' + x, '%b %Y')))
# Set tick labels and positions
ax.set_xticks(tick_nums)
ax.set_xticklabels(ticks_names)
# Set x limits for the plot
plt.xlim(date_nums[0], date_nums[-1]+30);
# Make x-axis ticks point outward
ax.get_xaxis().set_tick_params(direction='out')
# Save as PDF
plt.savefig('libmesh_pagehits.pdf')
# Local Variables:
# python-indent: 2
# End:
|
lgpl-2.1
|
clham/portfolio
|
portfolio.py
|
1
|
3609
|
import pandas as pd
import numpy as np
import scipy as sp
class Portfolio(object):
def __init__(self):
'''establishes a portfolio class
'''
import pandas as pd
import numpy as np
import scipy as sp
self.Assets=pd.DataFrame(columns=['mu','vol','weight'], dtype='float')
self.Assets.index.name='Asset'
self.Rho=pd.DataFrame(columns=[], dtype='float')
def add_asset(self, asset, mu, vol, weight):
'''Adds an asset class to the portfolio
'''
self.Assets.loc[asset]=[mu, vol, weight]
self.Rho.ix[asset,asset]=1
def add_rho(self, asset1, asset2, rho):
'''adds correlation coefficients to the table, also ensuring it remains symetric
'''
if set([asset1, asset2]).issubset(self.Assets.index):
self.Rho.ix[asset1, asset2]=rho
self.Rho.ix[asset2, asset1]=rho
else:
print("One of the assets is not defined. Please add the asset before assigning correlation")
def monte_carlo(self, time):
'''performs an N period montecarlo simulation of the portfolio, without rebalanceing.
Returns: end value factor
'''
#convert everything germane into np arrays
mu=self.Assets[['mu']].values
vol=self.Assets[['vol']].values
var=self.var_covar()
#generate 1xN matrix of Nrand numbers
Z=np.random.randn(len(self.Assets.index),1)
#generate a matrix of multivariate normal numbers by Chol(VAR).T*Z
Z_interaction=np.dot(np.linalg.cholesky(var).T,Z)
drift=np.dot((mu-np.power(vol,2)/2), time)
noise=Z_interaction*time**.5
#combine GBR
value=np.exp(np.multiply(np.add(drift,noise), self.Assets[['weight']].values).sum())
return(value)
def var_covar(self):
'''Returns VAR_COVAR matrix of the portfolio
'''
vold=np.diag(self.Assets['vol'])
return(np.dot( np.dot(vold, self.Rho.values), vold))
def set_default_portfolio(self):
'''sets up default portfolio'''
equity=.60
domestic=.7
international=.3
debt=.35
reit=.1
#setup portfolio
self.add_asset('large domestic', .0849, .1475, equity*domestic*.65)
self.add_asset('mid domestic', .0919, .1775, equity*domestic*.27)
self.add_asset('small domestic', .0924, .1975, equity*domestic*.08)
self.add_asset('international equity', .087, .145, equity*international)
self.add_asset('agg debt', .0435, .045, debt)
self.add_asset('US REIT', .0855, .2, reit)
self.add_rho('large domestic', 'mid domestic', .96)
self.add_rho('large domestic', 'small domestic', .92)
self.add_rho('large domestic', 'international equity', .88)
self.add_rho('large domestic', 'agg debt', .04)
self.add_rho('large domestic', 'US REIT', .77)
self.add_rho('mid domestic', 'small domestic', .94)
self.add_rho('mid domestic', 'international equity', .88)
self.add_rho('mid domestic', 'agg debt', .03)
self.add_rho('mid domestic', 'US REIT', .79)
self.add_rho('small domestic', 'international equity', .82)
self.add_rho('small domestic', 'agg debt', -.04)
self.add_rho('small domestic', 'US REIT', .79)
self.add_rho('international equity', 'agg debt', -.02)
self.add_rho('international equity', 'US REIT', .66)
self.add_rho('agg debt', 'US REIT', .22)
|
mit
|
anguoyang/SMQTK
|
MASIR/python/masir/search/EventContentDescriptor/cross_validation/perf_estimation.py
|
1
|
13789
|
# coding=utf-8
"""
LICENCE
-------
Copyright 2013 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
"""
import numpy as np
import itertools
def compute_errorRate(labels, scores, target_class=1):
"""
Compute prbability of false alarm (PFA) and probability of miss detection (PD) at every sample point.
This is done by sorting the scores, and counting the miss/false alarms based on labels.
@param labels: list of ground truth labels
@type labels: iterable
@param scores: scores generated by classifiers
@type scores: numpy.iterable (includes most iterable)
@target_class: the label of the target (positive) class (default = 1)
@type target_class: int
@return: (sorted) PFAs, PMDs, and sorted indices of the original inputs (increasing order)
@rtype: tuple of (numpy.array of floats, numpy.array of floats, numpy.array of int)
"""
num_scores = len(scores)
if not len(labels)==num_scores:
print 'The number of labels and scores should be same...'
return 0
sortedIndex = np.argsort(scores)[::-1]
sortedLabels = [labels[idx] for idx in sortedIndex]
# sortedScores = scores[sortedIndex]
num_pos = 0
for i in range(num_scores):
if labels[i]==target_class:
num_pos += 1
num_neg = num_scores - num_pos
pfas = np.zeros(num_scores)
pmds = np.zeros(num_scores)
fp, fn = 0, num_pos
for i in range(num_scores):
if sortedLabels[i]==target_class:
fn -= 1
else:
fp += 1
if not num_neg==0:
pfas[i] = fp/float(num_neg)
if not num_pos==0:
pmds[i] = fn/float(num_pos)
return pfas, pmds, sortedIndex
def plot_yearly_goals(ax, goal_years):
intercepts = [0.25, 0.50, 0.65, 0.75, 0.82]
for idx, year in enumerate(goal_years):
xs = np.arange(0, 1, 0.0001)
ys = 12.5 * xs + intercepts[year-1]
plot_label = 'Year %d Goal' % (year)
ax.plot(xs*100, ys,'k--', label=plot_label)
def average_precision_R0(inputs, plot_parameters = None):
"""
Compute Average Precision, R0, and maximum possible R0.
For definition of metrics, look at ALADDIN program documentation on metrics for Year 3 and on.
Also, plots curves if filename is specified.
@param inputs: list of dictionary, each dictionary contains information regarding different event as follows
input['scores']: series of scores in the same order as labels, with type iterable
input['labels']: series of ground truth labels as labels
input['gamma']: optional, default = 0.001, ratio of positives
input['target_class']: target class label, integer
input['threshold']: optional float, if exists, computes R0 (in addition to R0*), and also mark on curve
input['topN']: optional, if exists, only use topN scores for results computing
input['plot_parameters']: optional dictionary, if exists each input write its own plot file
input['plot_parameters']['filename_plot']: string, write a plot file
input['plot_parameters']['title']: optional, title of image
input['plot_parameters']['name']: name of this curve to be included in legend
input['plot_parameters']['color']: optional, color for this curve,
input['plot_parameters']['linewidth']: optional, linewidth for this curve
input['plot_parameters']['resolution']: optional, size of output image, default = (600.0, 600.0)
input['plot_parameters']['use_log_xaxis']: optional boolean, default = False, if True use [10, 100, 1000,..] as uniformly spaced x tickmarks
input['plot_parameters']['show_threshold']: optional boolean, default = True, if True and thrshold exists, mark the threshold on the curve
input['plot_parameters']['show_R0_star']: optional boolean, default = True, if True, show R0* in legend
@param plot_parameters: optional, dictionary, plotting parameters to draw multiple inputs together.
input['plot_parameters']['filename_plot']: string, path for a plot file.
input['plot_parameters']['title']: optional, title of image.
input['plot_parameters']['resolution']: optional, size of output image, default = (600.0, 600.0).
input['plot_parameters']['use_log_xaxis']: optional boolean, default = False, if True use [10, 100, 1000,..] as uniformly spaced x tickmarks.
input['plot_parameters']['show_threshold']: optional boolean, default = True, if True, mark the thresholds (for those it exists) on the curves
plot_parameters['show_R0_star']: optional boolean, default = True, if True, show R0* in legend
For legends/colors/linewidths, use name/color/linewidth of each input plot parameters
@return: list of dictionary, each with the following items
outputs: list of output dictionary for each input
output['ap_prime']: normalized ap
output['ap']: traditional average precision
output['R0_star']: maximum possible R0
output['R0']: optional, R0 at threshold
"""
#matplotlib.use('Agg') # this step makes it work on machines with GUI backend.
import matplotlib.pyplot as plt
colors = itertools.cycle([[1,0,0], "b", "g", "c", "m", "y", "k"])
outputs = [dict() for k in range(len(inputs))]
# list of plot handles
p = [0]*len(inputs)
# if all the curves are to be plotted on same figure, initialize figure
if plot_parameters is not None:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# iterate over list of inputs
for idx, ip in enumerate(inputs):
scores = ip['scores']
labels = ip['labels']
num_scores = len(scores)
if not len(labels)==num_scores:
print 'The number of labels and scores should be the same.'
return 0
sortedIndex = np.argsort(scores)[::-1]
sortedLabels = [labels[index] for index in sortedIndex]
# set defaults
target_class = 1
gamma = 0.001
dpi_value = 600
line_width = 2.0
show_threshold = True
show_R0_star = True
# input parameter options
if 'target_class' in ip:
target_class = ip['target_class']
if 'topN' in ip:
sortedLabels = sortedLabels[:ip['topN']]
num_scores = ip['topN']
if 'gamma' in ip:
gamma = ip['gamma']
# compute metrics
num_pos = sortedLabels.count(target_class)
percent_rank = np.zeros(num_scores)
precision = np.zeros(num_scores)
recall = np.zeros(num_scores)
true_detection = 0
for i in range(num_scores):
percent_rank[i] = (i+1) / float(num_scores)
if sortedLabels[i] == target_class:
true_detection += 1
precision[i] = true_detection / float(i + 1)
recall[i] = true_detection / float(num_pos)
indices = [i for i, x in enumerate(sortedLabels) if x == target_class]
ap_prime = np.sum (gamma * recall[indices] / percent_rank[indices]) / num_pos
ap = np.mean(precision[indices])
R0_star = np.max(recall - 12.5 * percent_rank)
outputs[idx]['ap_prime'] = ap_prime
outputs[idx]['ap'] = ap
outputs[idx]['R0_star'] = R0_star
# plotting parameters
if 'threshold' in ip:
index = scores.index(ip['threshold'])
R0 = recall[index] - 12.5*percent_rank[index]
outputs[idx]['R0'] = R0
if 'plot_parameters' is not None:
if 'show_threshold' in plot_parameters:
show_threshold = plot_parameters['show_threshold']
if 'plot_parameters' in ip:
if 'show_threshold' in ip['plot_parameters']:
show_threshold = ip['plot_parameters']['show_threshold']
else:
show_threshold = False
if plot_parameters is not None:
if 'show_R0_star' in plot_parameters:
show_R0_star = plot_parameters['show_R0_star']
if 'plot_parameters' in ip:
if 'show_R0_star' in ip['plot_parameters']:
show_R0_star = ip['plot_parameters']['show_R0_star']
if show_R0_star:
plot_label = 'AP\' = %0.4f, AP = %0.4f, R0* = %0.4f' % (ap_prime, ap, R0_star)
else:
plot_label = 'AP\' = %0.4f, AP = %0.4f' % (ap_prime, ap)
if 'plot_parameters' in ip:
if 'color' in ip['plot_parameters']:
color_name = ip['plot_parameters']['color']
else:
color_name = next(colors)
if 'linewidth' in ip['plot_parameters']:
line_width = ip['plot_parameters']['linewidth']
if 'name' in ip['plot_parameters']:
plot_label = '%s, %s' % (ip['plot_parameters']['name'], plot_label)
else:
color_name = next(colors)
# plot percent_rank vs. recall curve
if plot_parameters is None:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
p[idx] = ax.plot(percent_rank*100, recall, linewidth=line_width, color = color_name, label = plot_label)
if show_threshold:
index = scores.index(ip['threshold'])
plt.plot(percent_rank[index], recall[index], '^', markersize = 15, color = color_name)
# save each plot
if 'plot_parameters' in ip:
if 'title' in ip['plot_parameters']:
plt.title(ip['plot_parameters']['title'])
if 'resolution' in ip['plot_parameters']:
dpi_value = ip['plot_parameters']['resolution']
if 'filename_plot' in ip['plot_parameters']:
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1],loc = 'best', prop = {'size': 10})
plt.xlabel("Percent Rank")
plt.ylabel("Recall")
plt.ylim(0,1)
plt.xlim(0,100)
plt.grid(b=True, axis='both', linestyle=':', linewidth=0.3)
plt.savefig(ip['plot_parameters']['filename_plot'], dpi=dpi_value)
# save final plot with all the curves
if plot_parameters is not None:
if 'title' in plot_parameters:
plt.title(plot_parameters['title'])
if 'resolution' in plot_parameters:
dpi_value = plot_parameters['resolution']
if 'filename_plot' in plot_parameters:
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1],loc = 'best', prop = {'size': 10})
plt.xlabel("Percent Rank")
plt.ylabel("Recall")
plt.ylim(0,1)
plt.xlim(0,100)
plt.grid(b=True, axis='both', linestyle=':', linewidth=0.3)
plt.savefig(plot_parameters['filename_plot'], dpi=dpi_value)
return outputs
def average_precision(scores, labels, target_class=1, thresholds = None, topN = None):
"""
Compute Average Precision for one class, and return AP.
@param scores: series of scores in the same order as labels
@param labels: ground truth labels
@param target_class: the positive class (among possible labels)
@param topN: (optional) only compute AP among top N scoring examples
@type labels: iterable
@type scores: numpy.array_like
@type target_class: integer
@type topN: integer, optional, default = None (all samples will be used)
@return: average precision
@rtype: float
"""
if isinstance(scores[0], list):
inputs = [dict() for k in range(len(scores))]
for i in range(len(scores)):
inputs[i]['scores'] = scores[i]
inputs[i]['labels'] = labels[i]
inputs[i]['target_class'] = target_class
inputs[i]['plot_parameters'] = dict()
if thresholds is not None:
inputs[i]['threshold'] = thresholds[i]
if topN is not None:
inputs[i]['topN'] = topN
inputs[0]['plot_parameters']['name'] = 'E006'
inputs[1]['plot_parameters']['name'] = 'E007'
# inputs[0]['plot_parameters']['filename_plot'] = 'E006.jpg'
# inputs[1]['plot_parameters']['filename_plot'] = 'E007.jpg'
# inputs[0]['plot_parameters']['color'] = [1, 0, 0]
# inputs[1]['plot_parameters']['color'] = 'm'
else:
inputs = dict()
inputs['scores'] = scores
inputs['labels'] = labels
inputs['target_class'] = target_class
if thresholds is not None:
inputs['thresholds'] = thresholds
if topN is not None:
inputs['topN'] = topN
plot_parameters = dict()
plot_parameters['resolution'] = 600
plot_parameters['title'] = 'Performance Metrics'
plot_parameters['filename_plot'] = 'performance_metrics.jpg'
plot_parameters['show_R0_star'] = False
outputs = average_precision_R0(inputs, plot_parameters)
#outputs = average_precision_R0(inputs)
return outputs
if __name__ == "__main__" :
scores6 = (np.loadtxt('scores6.txt')).tolist()
labels6 = np.loadtxt('labels6.txt').tolist()
scores7 = (np.loadtxt('scores7.txt')).tolist()
labels7 = np.loadtxt('labels7.txt').tolist()
scores = [scores6, scores7]
labels = [labels6, labels7]
# scores = ([5, 4, 3, 2, 1], [1, 2, 3, 4, 5])
# labels = ([1, 1, 0, 1, 0], [1, 0, 0, 1, 1])
# target_class = [1, 1]
# thresholds = [3, 3]
#
metrics = average_precision(scores, labels, 1)
|
bsd-3-clause
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/dviread.py
|
10
|
33393
|
"""
An experimental module for reading dvi files output by TeX. Several
limitations make this not (currently) useful as a general-purpose dvi
preprocessor, but it is currently used by the pdf backend for
processing usetex text.
Interface::
dvi = Dvi(filename, 72)
# iterate over pages (but only one page is supported for now):
for page in dvi:
w, h, d = page.width, page.height, page.descent
for x,y,font,glyph,width in page.text:
fontname = font.texname
pointsize = font.size
...
for x,y,height,width in page.boxes:
...
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import errno
import matplotlib
import matplotlib.cbook as mpl_cbook
from matplotlib.compat import subprocess
from matplotlib import rcParams
import numpy as np
import struct
import sys
import os
if six.PY3:
def ord(x):
return x
_dvistate = mpl_cbook.Bunch(pre=0, outer=1, inpage=2, post_post=3, finale=4)
class Dvi(object):
"""
A dvi ("device-independent") file, as produced by TeX.
The current implementation only reads the first page and does not
even attempt to verify the postamble.
"""
def __init__(self, filename, dpi):
"""
Initialize the object. This takes the filename as input and
opens the file; actually reading the file happens when
iterating through the pages of the file.
"""
matplotlib.verbose.report('Dvi: ' + filename, 'debug')
self.file = open(filename, 'rb')
self.dpi = dpi
self.fonts = {}
self.state = _dvistate.pre
self.baseline = self._get_baseline(filename)
def _get_baseline(self, filename):
if rcParams['text.latex.preview']:
base, ext = os.path.splitext(filename)
baseline_filename = base + ".baseline"
if os.path.exists(baseline_filename):
with open(baseline_filename, 'rb') as fd:
l = fd.read().split()
height, depth, width = l
return float(depth)
return None
def __iter__(self):
"""
Iterate through the pages of the file.
Returns (text, boxes) pairs, where:
text is a list of (x, y, fontnum, glyphnum, width) tuples
boxes is a list of (x, y, height, width) tuples
The coordinates are transformed into a standard Cartesian
coordinate system at the dpi value given when initializing.
The coordinates are floating point numbers, but otherwise
precision is not lost and coordinate values are not clipped to
integers.
"""
while True:
have_page = self._read()
if have_page:
yield self._output()
else:
break
def close(self):
"""
Close the underlying file if it is open.
"""
if not self.file.closed:
self.file.close()
def _output(self):
"""
Output the text and boxes belonging to the most recent page.
page = dvi._output()
"""
minx, miny, maxx, maxy = np.inf, np.inf, -np.inf, -np.inf
maxy_pure = -np.inf
for elt in self.text + self.boxes:
if len(elt) == 4: # box
x,y,h,w = elt
e = 0 # zero depth
else: # glyph
x,y,font,g,w = elt
h,e = font._height_depth_of(g)
minx = min(minx, x)
miny = min(miny, y - h)
maxx = max(maxx, x + w)
maxy = max(maxy, y + e)
maxy_pure = max(maxy_pure, y)
if self.dpi is None:
# special case for ease of debugging: output raw dvi coordinates
return mpl_cbook.Bunch(text=self.text, boxes=self.boxes,
width=maxx-minx, height=maxy_pure-miny,
descent=descent)
d = self.dpi / (72.27 * 2**16) # from TeX's "scaled points" to dpi units
if self.baseline is None:
descent = (maxy - maxy_pure) * d
else:
descent = self.baseline
text = [ ((x-minx)*d, (maxy-y)*d - descent, f, g, w*d)
for (x,y,f,g,w) in self.text ]
boxes = [ ((x-minx)*d, (maxy-y)*d - descent, h*d, w*d) for (x,y,h,w) in self.boxes ]
return mpl_cbook.Bunch(text=text, boxes=boxes,
width=(maxx-minx)*d,
height=(maxy_pure-miny)*d,
descent=descent)
def _read(self):
"""
Read one page from the file. Return True if successful,
False if there were no more pages.
"""
while True:
byte = ord(self.file.read(1)[0])
self._dispatch(byte)
if byte == 140: # end of page
return True
if self.state == _dvistate.post_post: # end of file
self.close()
return False
def _arg(self, nbytes, signed=False):
"""
Read and return an integer argument *nbytes* long.
Signedness is determined by the *signed* keyword.
"""
str = self.file.read(nbytes)
value = ord(str[0])
if signed and value >= 0x80:
value = value - 0x100
for i in range(1, nbytes):
value = 0x100*value + ord(str[i])
return value
def _dispatch(self, byte):
"""
Based on the opcode *byte*, read the correct kinds of
arguments from the dvi file and call the method implementing
that opcode with those arguments.
"""
if 0 <= byte <= 127: self._set_char(byte)
elif byte == 128: self._set_char(self._arg(1))
elif byte == 129: self._set_char(self._arg(2))
elif byte == 130: self._set_char(self._arg(3))
elif byte == 131: self._set_char(self._arg(4, True))
elif byte == 132: self._set_rule(self._arg(4, True), self._arg(4, True))
elif byte == 133: self._put_char(self._arg(1))
elif byte == 134: self._put_char(self._arg(2))
elif byte == 135: self._put_char(self._arg(3))
elif byte == 136: self._put_char(self._arg(4, True))
elif byte == 137: self._put_rule(self._arg(4, True), self._arg(4, True))
elif byte == 138: self._nop()
elif byte == 139: self._bop(*[self._arg(4, True) for i in range(11)])
elif byte == 140: self._eop()
elif byte == 141: self._push()
elif byte == 142: self._pop()
elif byte == 143: self._right(self._arg(1, True))
elif byte == 144: self._right(self._arg(2, True))
elif byte == 145: self._right(self._arg(3, True))
elif byte == 146: self._right(self._arg(4, True))
elif byte == 147: self._right_w(None)
elif byte == 148: self._right_w(self._arg(1, True))
elif byte == 149: self._right_w(self._arg(2, True))
elif byte == 150: self._right_w(self._arg(3, True))
elif byte == 151: self._right_w(self._arg(4, True))
elif byte == 152: self._right_x(None)
elif byte == 153: self._right_x(self._arg(1, True))
elif byte == 154: self._right_x(self._arg(2, True))
elif byte == 155: self._right_x(self._arg(3, True))
elif byte == 156: self._right_x(self._arg(4, True))
elif byte == 157: self._down(self._arg(1, True))
elif byte == 158: self._down(self._arg(2, True))
elif byte == 159: self._down(self._arg(3, True))
elif byte == 160: self._down(self._arg(4, True))
elif byte == 161: self._down_y(None)
elif byte == 162: self._down_y(self._arg(1, True))
elif byte == 163: self._down_y(self._arg(2, True))
elif byte == 164: self._down_y(self._arg(3, True))
elif byte == 165: self._down_y(self._arg(4, True))
elif byte == 166: self._down_z(None)
elif byte == 167: self._down_z(self._arg(1, True))
elif byte == 168: self._down_z(self._arg(2, True))
elif byte == 169: self._down_z(self._arg(3, True))
elif byte == 170: self._down_z(self._arg(4, True))
elif 171 <= byte <= 234: self._fnt_num(byte-171)
elif byte == 235: self._fnt_num(self._arg(1))
elif byte == 236: self._fnt_num(self._arg(2))
elif byte == 237: self._fnt_num(self._arg(3))
elif byte == 238: self._fnt_num(self._arg(4, True))
elif 239 <= byte <= 242:
len = self._arg(byte-238)
special = self.file.read(len)
self._xxx(special)
elif 243 <= byte <= 246:
k = self._arg(byte-242, byte==246)
c, s, d, a, l = [ self._arg(x) for x in (4, 4, 4, 1, 1) ]
n = self.file.read(a+l)
self._fnt_def(k, c, s, d, a, l, n)
elif byte == 247:
i, num, den, mag, k = [ self._arg(x) for x in (1, 4, 4, 4, 1) ]
x = self.file.read(k)
self._pre(i, num, den, mag, x)
elif byte == 248: self._post()
elif byte == 249: self._post_post()
else:
raise ValueError("unknown command: byte %d"%byte)
def _pre(self, i, num, den, mag, comment):
if self.state != _dvistate.pre:
raise ValueError("pre command in middle of dvi file")
if i != 2:
raise ValueError("Unknown dvi format %d"%i)
if num != 25400000 or den != 7227 * 2**16:
raise ValueError("nonstandard units in dvi file")
# meaning: TeX always uses those exact values, so it
# should be enough for us to support those
# (There are 72.27 pt to an inch so 7227 pt =
# 7227 * 2**16 sp to 100 in. The numerator is multiplied
# by 10^5 to get units of 10**-7 meters.)
if mag != 1000:
raise ValueError("nonstandard magnification in dvi file")
# meaning: LaTeX seems to frown on setting \mag, so
# I think we can assume this is constant
self.state = _dvistate.outer
def _set_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError("misplaced set_char in dvi file")
self._put_char(char)
self.h += self.fonts[self.f]._width_of(char)
def _set_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced set_rule in dvi file")
self._put_rule(a, b)
self.h += b
def _put_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError("misplaced put_char in dvi file")
font = self.fonts[self.f]
if font._vf is None:
self.text.append((self.h, self.v, font, char,
font._width_of(char)))
else:
scale = font._scale
for x, y, f, g, w in font._vf[char].text:
newf = DviFont(scale=_mul2012(scale, f._scale),
tfm=f._tfm, texname=f.texname, vf=f._vf)
self.text.append((self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
newf, g, newf._width_of(g)))
self.boxes.extend([(self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
_mul2012(a, scale), _mul2012(b, scale))
for x, y, a, b in font._vf[char].boxes])
def _put_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced put_rule in dvi file")
if a > 0 and b > 0:
self.boxes.append((self.h, self.v, a, b))
def _nop(self):
pass
def _bop(self, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, p):
if self.state != _dvistate.outer:
raise ValueError("misplaced bop in dvi file (state %d)" % self.state)
self.state = _dvistate.inpage
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack = []
self.text = [] # list of (x,y,fontnum,glyphnum)
self.boxes = [] # list of (x,y,width,height)
def _eop(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced eop in dvi file")
self.state = _dvistate.outer
del self.h, self.v, self.w, self.x, self.y, self.z, self.stack
def _push(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced push in dvi file")
self.stack.append((self.h, self.v, self.w, self.x, self.y, self.z))
def _pop(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced pop in dvi file")
self.h, self.v, self.w, self.x, self.y, self.z = self.stack.pop()
def _right(self, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced right in dvi file")
self.h += b
def _right_w(self, new_w):
if self.state != _dvistate.inpage:
raise ValueError("misplaced w in dvi file")
if new_w is not None:
self.w = new_w
self.h += self.w
def _right_x(self, new_x):
if self.state != _dvistate.inpage:
raise ValueError("misplaced x in dvi file")
if new_x is not None:
self.x = new_x
self.h += self.x
def _down(self, a):
if self.state != _dvistate.inpage:
raise ValueError("misplaced down in dvi file")
self.v += a
def _down_y(self, new_y):
if self.state != _dvistate.inpage:
raise ValueError("misplaced y in dvi file")
if new_y is not None:
self.y = new_y
self.v += self.y
def _down_z(self, new_z):
if self.state != _dvistate.inpage:
raise ValueError("misplaced z in dvi file")
if new_z is not None:
self.z = new_z
self.v += self.z
def _fnt_num(self, k):
if self.state != _dvistate.inpage:
raise ValueError("misplaced fnt_num in dvi file")
self.f = k
def _xxx(self, special):
if six.PY3:
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and chr(ch)
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
else:
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and ch
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
def _fnt_def(self, k, c, s, d, a, l, n):
fontname = n[-l:].decode('ascii')
tfm = _tfmfile(fontname)
if tfm is None:
if six.PY2:
error_class = OSError
else:
error_class = FileNotFoundError
raise error_class("missing font metrics file: %s" % fontname)
if c != 0 and tfm.checksum != 0 and c != tfm.checksum:
raise ValueError('tfm checksum mismatch: %s'%n)
vf = _vffile(fontname)
self.fonts[k] = DviFont(scale=s, tfm=tfm, texname=n, vf=vf)
def _post(self):
if self.state != _dvistate.outer:
raise ValueError("misplaced post in dvi file")
self.state = _dvistate.post_post
# TODO: actually read the postamble and finale?
# currently post_post just triggers closing the file
def _post_post(self):
raise NotImplementedError
class DviFont(object):
"""
Object that holds a font's texname and size, supports comparison,
and knows the widths of glyphs in the same units as the AFM file.
There are also internal attributes (for use by dviread.py) that
are *not* used for comparison.
The size is in Adobe points (converted from TeX points).
.. attribute:: texname
Name of the font as used internally by TeX and friends. This
is usually very different from any external font names, and
:class:`dviread.PsfontsMap` can be used to find the external
name of the font.
.. attribute:: size
Size of the font in Adobe points, converted from the slightly
smaller TeX points.
.. attribute:: widths
Widths of glyphs in glyph-space units, typically 1/1000ths of
the point size.
"""
__slots__ = ('texname', 'size', 'widths', '_scale', '_vf', '_tfm')
def __init__(self, scale, tfm, texname, vf):
if six.PY3 and isinstance(texname, bytes):
texname = texname.decode('ascii')
self._scale, self._tfm, self.texname, self._vf = \
scale, tfm, texname, vf
self.size = scale * (72.0 / (72.27 * 2**16))
try:
nchars = max(six.iterkeys(tfm.width)) + 1
except ValueError:
nchars = 0
self.widths = [ (1000*tfm.width.get(char, 0)) >> 20
for char in xrange(nchars) ]
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.texname == other.texname and self.size == other.size
def __ne__(self, other):
return not self.__eq__(other)
def _width_of(self, char):
"""
Width of char in dvi units. For internal use by dviread.py.
"""
width = self._tfm.width.get(char, None)
if width is not None:
return _mul2012(width, self._scale)
matplotlib.verbose.report(
'No width for char %d in font %s' % (char, self.texname),
'debug')
return 0
def _height_depth_of(self, char):
"""
Height and depth of char in dvi units. For internal use by dviread.py.
"""
result = []
for metric,name in ((self._tfm.height, "height"),
(self._tfm.depth, "depth")):
value = metric.get(char, None)
if value is None:
matplotlib.verbose.report(
'No %s for char %d in font %s' % (name, char, self.texname),
'debug')
result.append(0)
else:
result.append(_mul2012(value, self._scale))
return result
class Vf(Dvi):
"""
A virtual font (\*.vf file) containing subroutines for dvi files.
Usage::
vf = Vf(filename)
glyph = vf[code]
glyph.text, glyph.boxes, glyph.width
"""
def __init__(self, filename):
Dvi.__init__(self, filename, 0)
try:
self._first_font = None
self._chars = {}
self._packet_ends = None
self._read()
finally:
self.close()
def __getitem__(self, code):
return self._chars[code]
def _dispatch(self, byte):
# If we are in a packet, execute the dvi instructions
if self.state == _dvistate.inpage:
byte_at = self.file.tell()-1
if byte_at == self._packet_ends:
self._finalize_packet()
# fall through
elif byte_at > self._packet_ends:
raise ValueError("Packet length mismatch in vf file")
else:
if byte in (139, 140) or byte >= 243:
raise ValueError("Inappropriate opcode %d in vf file" % byte)
Dvi._dispatch(self, byte)
return
# We are outside a packet
if byte < 242: # a short packet (length given by byte)
cc, tfm = self._arg(1), self._arg(3)
self._init_packet(byte, cc, tfm)
elif byte == 242: # a long packet
pl, cc, tfm = [ self._arg(x) for x in (4, 4, 4) ]
self._init_packet(pl, cc, tfm)
elif 243 <= byte <= 246:
Dvi._dispatch(self, byte)
elif byte == 247: # preamble
i, k = self._arg(1), self._arg(1)
x = self.file.read(k)
cs, ds = self._arg(4), self._arg(4)
self._pre(i, x, cs, ds)
elif byte == 248: # postamble (just some number of 248s)
self.state = _dvistate.post_post
else:
raise ValueError("unknown vf opcode %d" % byte)
def _init_packet(self, pl, cc, tfm):
if self.state != _dvistate.outer:
raise ValueError("Misplaced packet in vf file")
self.state = _dvistate.inpage
self._packet_ends = self.file.tell() + pl
self._packet_char = cc
self._packet_width = tfm
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack, self.text, self.boxes = [], [], []
self.f = self._first_font
def _finalize_packet(self):
self._chars[self._packet_char] = mpl_cbook.Bunch(
text=self.text, boxes=self.boxes, width = self._packet_width)
self.state = _dvistate.outer
def _pre(self, i, x, cs, ds):
if self.state != _dvistate.pre:
raise ValueError("pre command in middle of vf file")
if i != 202:
raise ValueError("Unknown vf format %d" % i)
if len(x):
matplotlib.verbose.report('vf file comment: ' + x, 'debug')
self.state = _dvistate.outer
# cs = checksum, ds = design size
def _fnt_def(self, k, *args):
Dvi._fnt_def(self, k, *args)
if self._first_font is None:
self._first_font = k
def _fix2comp(num):
"""
Convert from two's complement to negative.
"""
assert 0 <= num < 2**32
if num & 2**31:
return num - 2**32
else:
return num
def _mul2012(num1, num2):
"""
Multiply two numbers in 20.12 fixed point format.
"""
# Separated into a function because >> has surprising precedence
return (num1*num2) >> 20
class Tfm(object):
"""
A TeX Font Metric file. This implementation covers only the bare
minimum needed by the Dvi class.
.. attribute:: checksum
Used for verifying against the dvi file.
.. attribute:: design_size
Design size of the font (in what units?)
.. attribute:: width
Width of each character, needs to be scaled by the factor
specified in the dvi file. This is a dict because indexing may
not start from 0.
.. attribute:: height
Height of each character.
.. attribute:: depth
Depth of each character.
"""
__slots__ = ('checksum', 'design_size', 'width', 'height', 'depth')
def __init__(self, filename):
matplotlib.verbose.report('opening tfm file ' + filename, 'debug')
with open(filename, 'rb') as file:
header1 = file.read(24)
lh, bc, ec, nw, nh, nd = \
struct.unpack(str('!6H'), header1[2:14])
matplotlib.verbose.report(
'lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d' % (
lh, bc, ec, nw, nh, nd), 'debug')
header2 = file.read(4*lh)
self.checksum, self.design_size = \
struct.unpack(str('!2I'), header2[:8])
# there is also encoding information etc.
char_info = file.read(4*(ec-bc+1))
widths = file.read(4*nw)
heights = file.read(4*nh)
depths = file.read(4*nd)
self.width, self.height, self.depth = {}, {}, {}
widths, heights, depths = \
[ struct.unpack(str('!%dI') % (len(x)/4), x)
for x in (widths, heights, depths) ]
for idx, char in enumerate(xrange(bc, ec+1)):
self.width[char] = _fix2comp(widths[ord(char_info[4*idx])])
self.height[char] = _fix2comp(heights[ord(char_info[4*idx+1]) >> 4])
self.depth[char] = _fix2comp(depths[ord(char_info[4*idx+1]) & 0xf])
class PsfontsMap(object):
"""
A psfonts.map formatted file, mapping TeX fonts to PS fonts.
Usage::
>>> map = PsfontsMap(find_tex_file('pdftex.map'))
>>> entry = map['ptmbo8r']
>>> entry.texname
'ptmbo8r'
>>> entry.psname
'Times-Bold'
>>> entry.encoding
'/usr/local/texlive/2008/texmf-dist/fonts/enc/dvips/base/8r.enc'
>>> entry.effects
{'slant': 0.16700000000000001}
>>> entry.filename
For historical reasons, TeX knows many Type-1 fonts by different
names than the outside world. (For one thing, the names have to
fit in eight characters.) Also, TeX's native fonts are not Type-1
but Metafont, which is nontrivial to convert to PostScript except
as a bitmap. While high-quality conversions to Type-1 format exist
and are shipped with modern TeX distributions, we need to know
which Type-1 fonts are the counterparts of which native fonts. For
these reasons a mapping is needed from internal font names to font
file names.
A texmf tree typically includes mapping files called e.g.
psfonts.map, pdftex.map, dvipdfm.map. psfonts.map is used by
dvips, pdftex.map by pdfTeX, and dvipdfm.map by dvipdfm.
psfonts.map might avoid embedding the 35 PostScript fonts (i.e.,
have no filename for them, as in the Times-Bold example above),
while the pdf-related files perhaps only avoid the "Base 14" pdf
fonts. But the user may have configured these files differently.
"""
__slots__ = ('_font',)
def __init__(self, filename):
self._font = {}
with open(filename, 'rt') as file:
self._parse(file)
def __getitem__(self, texname):
try:
result = self._font[texname]
except KeyError:
result = self._font[texname.decode('ascii')]
fn, enc = result.filename, result.encoding
if fn is not None and not fn.startswith('/'):
result.filename = find_tex_file(fn)
if enc is not None and not enc.startswith('/'):
result.encoding = find_tex_file(result.encoding)
return result
def _parse(self, file):
"""Parse each line into words."""
for line in file:
line = line.strip()
if line == '' or line.startswith('%'):
continue
words, pos = [], 0
while pos < len(line):
if line[pos] == '"': # double quoted word
pos += 1
end = line.index('"', pos)
words.append(line[pos:end])
pos = end + 1
else: # ordinary word
end = line.find(' ', pos+1)
if end == -1: end = len(line)
words.append(line[pos:end])
pos = end
while pos < len(line) and line[pos] == ' ':
pos += 1
self._register(words)
def _register(self, words):
"""Register a font described by "words".
The format is, AFAIK: texname fontname [effects and filenames]
Effects are PostScript snippets like ".177 SlantFont",
filenames begin with one or two less-than signs. A filename
ending in enc is an encoding file, other filenames are font
files. This can be overridden with a left bracket: <[foobar
indicates an encoding file named foobar.
There is some difference between <foo.pfb and <<bar.pfb in
subsetting, but I have no example of << in my TeX installation.
"""
# If the map file specifies multiple encodings for a font, we
# follow pdfTeX in choosing the last one specified. Such
# entries are probably mistakes but they have occurred.
# http://tex.stackexchange.com/questions/10826/
# http://article.gmane.org/gmane.comp.tex.pdftex/4914
texname, psname = words[:2]
effects, encoding, filename = '', None, None
for word in words[2:]:
if not word.startswith('<'):
effects = word
else:
word = word.lstrip('<')
if word.startswith('[') or word.endswith('.enc'):
if encoding is not None:
matplotlib.verbose.report(
'Multiple encodings for %s = %s'
% (texname, psname), 'debug')
if word.startswith('['):
encoding = word[1:]
else:
encoding = word
else:
assert filename is None
filename = word
eff = effects.split()
effects = {}
try:
effects['slant'] = float(eff[eff.index('SlantFont')-1])
except ValueError:
pass
try:
effects['extend'] = float(eff[eff.index('ExtendFont')-1])
except ValueError:
pass
self._font[texname] = mpl_cbook.Bunch(
texname=texname, psname=psname, effects=effects,
encoding=encoding, filename=filename)
class Encoding(object):
"""
Parses a \*.enc file referenced from a psfonts.map style file.
The format this class understands is a very limited subset of
PostScript.
Usage (subject to change)::
for name in Encoding(filename):
whatever(name)
"""
__slots__ = ('encoding',)
def __init__(self, filename):
with open(filename, 'rt') as file:
matplotlib.verbose.report('Parsing TeX encoding ' + filename, 'debug-annoying')
self.encoding = self._parse(file)
matplotlib.verbose.report('Result: ' + repr(self.encoding), 'debug-annoying')
def __iter__(self):
for name in self.encoding:
yield name
def _parse(self, file):
result = []
state = 0
for line in file:
comment_start = line.find('%')
if comment_start > -1:
line = line[:comment_start]
line = line.strip()
if state == 0:
# Expecting something like /FooEncoding [
if '[' in line:
state = 1
line = line[line.index('[')+1:].strip()
if state == 1:
if ']' in line: # ] def
line = line[:line.index(']')]
state = 2
words = line.split()
for w in words:
if w.startswith('/'):
# Allow for /abc/def/ghi
subwords = w.split('/')
result.extend(subwords[1:])
else:
raise ValueError("Broken name in encoding file: " + w)
return result
def find_tex_file(filename, format=None):
"""
Call :program:`kpsewhich` to find a file in the texmf tree. If
*format* is not None, it is used as the value for the
`--format` option.
Apparently most existing TeX distributions on Unix-like systems
use kpathsea. I hear MikTeX (a popular distribution on Windows)
doesn't use kpathsea, so what do we do? (TODO)
.. seealso::
`Kpathsea documentation <http://www.tug.org/kpathsea/>`_
The library that :program:`kpsewhich` is part of.
"""
cmd = [str('kpsewhich')]
if format is not None:
cmd += ['--format=' + format]
cmd += [filename]
matplotlib.verbose.report('find_tex_file(%s): %s' \
% (filename,cmd), 'debug')
# stderr is unused, but reading it avoids a subprocess optimization
# that breaks EINTR handling in some Python versions:
# http://bugs.python.org/issue12493
# https://github.com/matplotlib/matplotlib/issues/633
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result = pipe.communicate()[0].rstrip()
matplotlib.verbose.report('find_tex_file result: %s' % result,
'debug')
return result.decode('ascii')
# With multiple text objects per figure (e.g., tick labels) we may end
# up reading the same tfm and vf files many times, so we implement a
# simple cache. TODO: is this worth making persistent?
_tfmcache = {}
_vfcache = {}
def _fontfile(texname, class_, suffix, cache):
try:
return cache[texname]
except KeyError:
pass
filename = find_tex_file(texname + suffix)
if filename:
result = class_(filename)
else:
result = None
cache[texname] = result
return result
def _tfmfile(texname):
return _fontfile(texname, Tfm, '.tfm', _tfmcache)
def _vffile(texname):
return _fontfile(texname, Vf, '.vf', _vfcache)
if __name__ == '__main__':
import sys
matplotlib.verbose.set_level('debug-annoying')
fname = sys.argv[1]
try: dpi = float(sys.argv[2])
except IndexError: dpi = None
dvi = Dvi(fname, dpi)
fontmap = PsfontsMap(find_tex_file('pdftex.map'))
for page in dvi:
print('=== new page ===')
fPrev = None
for x,y,f,c,w in page.text:
if f != fPrev:
print('font', f.texname, 'scaled', f._scale/pow(2.0,20))
fPrev = f
print(x,y,c, 32 <= c < 128 and chr(c) or '.', w)
for x,y,w,h in page.boxes:
print(x,y,'BOX',w,h)
|
gpl-3.0
|
endolith/scikit-image
|
doc/examples/transform/plot_register_translation.py
|
14
|
2717
|
"""
=====================================
Cross-Correlation (Phase Correlation)
=====================================
In this example, we use phase correlation to identify the relative shift
between two similar-sized images.
The ``register_translation`` function uses cross-correlation in Fourier space,
optionally employing an upsampled matrix-multiplication DFT to achieve
arbitrary subpixel precision. [1]_
.. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
"Efficient subpixel image registration algorithms," Optics Letters 33,
156-158 (2008).
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import register_translation
from skimage.feature.register_translation import _upsampled_dft
from scipy.ndimage import fourier_shift
image = data.camera()
shift = (-2.4, 1.32)
# (-2.4, 1.32) pixel offset relative to reference coin
offset_image = fourier_shift(np.fft.fftn(image), shift)
offset_image = np.fft.ifftn(offset_image)
print("Known offset (y, x):")
print(shift)
# pixel precision first
shift, error, diffphase = register_translation(image, offset_image)
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1, adjustable='box-forced')
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax3 = plt.subplot(1, 3, 3)
ax1.imshow(image)
ax1.set_axis_off()
ax1.set_title('Reference image')
ax2.imshow(offset_image.real)
ax2.set_axis_off()
ax2.set_title('Offset image')
# View the output of a cross-correlation to show what the algorithm is
# doing behind the scenes
image_product = np.fft.fft2(image) * np.fft.fft2(offset_image).conj()
cc_image = np.fft.fftshift(np.fft.ifft2(image_product))
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Cross-correlation")
plt.show()
print("Detected pixel offset (y, x):")
print(shift)
# subpixel precision
shift, error, diffphase = register_translation(image, offset_image, 100)
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1, adjustable='box-forced')
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax3 = plt.subplot(1, 3, 3)
ax1.imshow(image)
ax1.set_axis_off()
ax1.set_title('Reference image')
ax2.imshow(offset_image.real)
ax2.set_axis_off()
ax2.set_title('Offset image')
# Calculate the upsampled DFT, again to show what the algorithm is doing
# behind the scenes. Constants correspond to calculated values in routine.
# See source code for details.
cc_image = _upsampled_dft(image_product, 150, 100, (shift*100)+75).conj()
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Supersampled XC sub-area")
plt.show()
print("Detected subpixel offset (y, x):")
print(shift)
|
bsd-3-clause
|
waditu/tushare
|
tushare/stock/classifying.py
|
2
|
10615
|
# -*- coding:utf-8 -*-
"""
获取股票分类数据接口
Created on 2015/02/01
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
from tushare.stock import cons as ct
from tushare.stock import ref_vars as rv
import json
import re
from pandas.util.testing import _network_error_classes
import time
import tushare.stock.fundamental as fd
from tushare.util.netbase import Client
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_industry_classified(standard='sina'):
"""
获取行业分类数据
Parameters
----------
standard
sina:新浪行业 sw:申万 行业
Returns
-------
DataFrame
code :股票代码
name :股票名称
c_name :行业名称
"""
if standard == 'sw':
# df = _get_type_data(ct.SINA_INDUSTRY_INDEX_URL%(ct.P_TYPE['http'],
# ct.DOMAINS['vsf'], ct.PAGES['ids_sw']))
df = pd.read_csv(ct.TSDATA_CLASS%(ct.P_TYPE['http'], ct.DOMAINS['oss'], 'industry_sw'),
dtype={'code':object})
else:
# df = _get_type_data(ct.SINA_INDUSTRY_INDEX_URL%(ct.P_TYPE['http'],
# ct.DOMAINS['vsf'], ct.PAGES['ids']))
df = pd.read_csv(ct.TSDATA_CLASS%(ct.P_TYPE['http'], ct.DOMAINS['oss'], 'industry'),
dtype={'code':object})
# data = []
# ct._write_head()
# for row in df.values:
# rowDf = _get_detail(row[0], retry_count=10, pause=0.01)
# rowDf['c_name'] = row[1]
# data.append(rowDf)
# data = pd.concat(data, ignore_index=True)
return df
def get_concept_classified():
"""
获取概念分类数据
Return
--------
DataFrame
code :股票代码
name :股票名称
c_name :概念名称
"""
df = pd.read_csv(ct.TSDATA_CLASS%(ct.P_TYPE['http'], ct.DOMAINS['oss'], 'concept'),
dtype={'code':object})
return df
def concetps():
ct._write_head()
df = _get_type_data(ct.SINA_CONCEPTS_INDEX_URL%(ct.P_TYPE['http'],
ct.DOMAINS['sf'], ct.PAGES['cpt']))
data = []
for row in df.values:
rowDf = _get_detail(row[0])
if rowDf is not None:
rowDf['c_name'] = row[1]
data.append(rowDf)
if len(data) > 0:
data = pd.concat(data, ignore_index=True)
data.to_csv('d:\\cpt.csv', index=False)
def get_concepts(src='dfcf'):
"""
获取概念板块行情数据
Return
--------
DataFrame
code :股票代码
name :股票名称
c_name :概念名称
"""
clt = Client(ct.ET_CONCEPTS_INDEX_URL%(ct.P_TYPE['http'],
ct.DOMAINS['dfcf'], _random(15)), ref='')
content = clt.gvalue()
content = content.decode('utf-8') if ct.PY3 else content
js = json.loads(content)
data = []
for row in js:
cols = row.split(',')
cs = cols[6].split('|')
arr = [cols[2], cols[3], cs[0], cs[2], cols[7], cols[9]]
data.append(arr)
df = pd.DataFrame(data, columns=['concept', 'change', 'up', 'down', 'top_code', 'top_name'])
return df
def get_area_classified():
"""
获取地域分类数据
Return
--------
DataFrame
code :股票代码
name :股票名称
area :地域名称
"""
df = fd.get_stock_basics()
df = df[['name', 'area']]
df.reset_index(inplace=True)
df = df.sort_values('area').reset_index(drop=True)
return df
def get_gem_classified():
"""
获取创业板股票
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
df = fd.get_stock_basics()
df.reset_index(inplace=True)
df = df[ct.FOR_CLASSIFY_COLS]
df = df.ix[df.code.str[0] == '3']
df = df.sort_values('code').reset_index(drop=True)
return df
def get_sme_classified():
"""
获取中小板股票
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
df = fd.get_stock_basics()
df.reset_index(inplace=True)
df = df[ct.FOR_CLASSIFY_COLS]
df = df.ix[df.code.str[0:3] == '002']
df = df.sort_values('code').reset_index(drop=True)
return df
def get_st_classified():
"""
获取风险警示板股票
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
df = fd.get_stock_basics()
df.reset_index(inplace=True)
df = df[ct.FOR_CLASSIFY_COLS]
df = df.ix[df.name.str.contains('ST')]
df = df.sort_values('code').reset_index(drop=True)
return df
def _get_detail(tag, retry_count=3, pause=0.001):
dfc = pd.DataFrame()
p = 0
num_limit = 100
while(True):
p = p+1
for _ in range(retry_count):
time.sleep(pause)
try:
ct._write_console()
request = Request(ct.SINA_DATA_DETAIL_URL%(ct.P_TYPE['http'],
ct.DOMAINS['vsf'], ct.PAGES['jv'],
p,tag))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
except _network_error_classes:
pass
else:
break
reg = re.compile(r'\,(.*?)\:')
text = reg.sub(r',"\1":', text)
text = text.replace('"{symbol', '{"symbol')
text = text.replace('{symbol', '{"symbol"')
jstr = json.dumps(text)
js = json.loads(jstr)
df = pd.DataFrame(pd.read_json(js, dtype={'code':object}), columns=ct.THE_FIELDS)
# df = df[ct.FOR_CLASSIFY_B_COLS]
df = df[['code', 'name']]
dfc = pd.concat([dfc, df])
if df.shape[0] < num_limit:
return dfc
#raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _get_type_data(url):
try:
request = Request(url)
data_str = urlopen(request, timeout=10).read()
data_str = data_str.decode('GBK')
data_str = data_str.split('=')[1]
data_json = json.loads(data_str)
df = pd.DataFrame([[row.split(',')[0], row.split(',')[1]] for row in data_json.values()],
columns=['tag', 'name'])
return df
except Exception as er:
print(str(er))
def get_hs300s():
"""
获取沪深300当前成份股及所占权重
Return
--------
DataFrame
code :股票代码
name :股票名称
date :日期
weight:权重
"""
try:
wt = pd.read_excel(ct.HS300_CLASSIFY_URL_FTP%(ct.P_TYPE['http'], ct.DOMAINS['idx'],
ct.PAGES['hs300w']), usecols=[0, 4, 5, 8])
wt.columns = ct.FOR_CLASSIFY_W_COLS
wt['code'] = wt['code'].map(lambda x :str(x).zfill(6))
return wt
except Exception as er:
print(str(er))
def get_sz50s():
"""
获取上证50成份股
Return
--------
DataFrame
date :日期
code :股票代码
name :股票名称
"""
try:
df = pd.read_excel(ct.SZ_CLASSIFY_URL_FTP%(ct.P_TYPE['http'], ct.DOMAINS['idx'],
ct.PAGES['sz50b']), parse_cols=[0, 4, 5])
df.columns = ct.FOR_CLASSIFY_B_COLS
df['code'] = df['code'].map(lambda x :str(x).zfill(6))
return df
except Exception as er:
print(str(er))
def get_zz500s():
"""
获取中证500成份股
Return
--------
DataFrame
date :日期
code :股票代码
name :股票名称
weight : 权重
"""
try:
wt = pd.read_excel(ct.HS300_CLASSIFY_URL_FTP%(ct.P_TYPE['http'], ct.DOMAINS['idx'],
ct.PAGES['zz500wt']), usecols=[0, 4, 5, 8])
wt.columns = ct.FOR_CLASSIFY_W_COLS
wt['code'] = wt['code'].map(lambda x :str(x).zfill(6))
return wt
except Exception as er:
print(str(er))
def get_terminated():
"""
获取终止上市股票列表
Return
--------
DataFrame
code :股票代码
name :股票名称
oDate:上市日期
tDate:终止上市日期
"""
try:
ref = ct.SSEQ_CQ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(rv.TERMINATED_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['ssecq'], _random(5),
_random()), ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
df = pd.DataFrame(lines['result'], columns=rv.TERMINATED_T_COLS)
df.columns = rv.TERMINATED_COLS
return df
except Exception as er:
print(str(er))
def get_suspended():
"""
获取暂停上市股票列表
Return
--------
DataFrame
code :股票代码
name :股票名称
oDate:上市日期
tDate:终止上市日期
"""
try:
ref = ct.SSEQ_CQ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(rv.SUSPENDED_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['ssecq'], _random(5),
_random()), ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
df = pd.DataFrame(lines['result'], columns=rv.TERMINATED_T_COLS)
df.columns = rv.TERMINATED_COLS
return df
except Exception as er:
print(str(er))
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
|
bsd-3-clause
|
makeyourowntextminingtoolkit/makeyourowntextminingtoolkit
|
text_mining_toolkit/visualisation.py
|
1
|
5350
|
# module with visualisation functions
import os
import wordcloud
import matplotlib.pyplot as plt
import numpy
import IPython.core.display
import networkx
import networkx.readwrite.json_graph
import random
# word cloud from pandas frame of word and freq
def plot_wordcloud(word_count):
# wordcloud object
wc = wordcloud.WordCloud(max_words=100, width=1200, height=800, background_color="white", margin=10,
prefer_horizontal=1.0)
# words and plot sizes (word count, relevance, etc)
wc.generate_from_frequencies(word_count.to_dict()[word_count.columns[0]])
# plot wordcloud
plt.figure(dpi=600, figsize=(6,4))
plt.imshow(wc)
plt.axis("off")
pass
# force-directed graph
def plot_force_directed_graph(node1_node1_weight):
# column names for node source and target, and edge attributes
node_source_name = node1_node1_weight.columns.values[0]
node_target_name = node1_node1_weight.columns.values[1]
link_edge_name = node1_node1_weight.columns.values[2]
# convert node1_node1_weight to graph
graph = networkx.from_pandas_dataframe(node1_node1_weight, source=node_source_name, target=node_target_name, edge_attr=link_edge_name)
# convert graph nodes and inks to json, ready for d3
graph_json = networkx.readwrite.json_graph.node_link_data(graph)
graph_json_nodes = graph_json['nodes']
graph_json_links = graph_json['links']
#print(str(graph_json_nodes))
#print(str(graph_json_links))
# read html template
html_template_file = os.path.join(os.path.dirname(__file__), 'html_templates/d3_force_directed_graph.html')
with open(html_template_file, mode='r') as f:
html = f.read()
pass
# read javascript template
js_template_file = os.path.join(os.path.dirname(__file__), 'html_templates/d3_force_directed_graph.js')
with open(js_template_file, mode='r') as f:
js = f.read()
pass
# generate random identifier for SVG element, to avoid name clashes if used multiple times in a notebook
random_id_string = str(random.randrange(1000000,9999999))
# replace placeholder in both html and js templates
html = html.replace('%%unique-id%%', random_id_string)
js = js.replace('%%unique-id%%', random_id_string)
# substitute links and data
js = js.replace('%%links%%', str(graph_json_links))
js = js.replace('%%nodes%%', str(graph_json_nodes))
js = js.replace('%%edge_attribute%%', link_edge_name)
#print(html)
#print(js)
# display html in notebook cell
IPython.core.display.display_html(IPython.core.display.HTML(html))
# display (run) javascript in notebook cell
IPython.core.display.display_javascript(IPython.core.display.Javascript(data=js))
pass
# force-directed graph2
def plot_force_directed_graph2(node1_node1_weight):
# column names for node source and target, and edge attributes
node_source_name = node1_node1_weight.columns.values[0]
node_target_name = node1_node1_weight.columns.values[1]
link_edge_name = node1_node1_weight.columns.values[2]
# convert node1_node1_weight to graph
graph = networkx.from_pandas_dataframe(node1_node1_weight, source=node_source_name, target=node_target_name, edge_attr=link_edge_name)
# convert graph nodes and inks to json, ready for d3
graph_json = networkx.readwrite.json_graph.node_link_data(graph)
graph_json_nodes = graph_json['nodes']
graph_json_links = graph_json['links']
#print(str(graph_json_nodes))
#print(str(graph_json_links))
# read html template
html_template_file = os.path.join(os.path.dirname(__file__), 'html_templates/d3_force_directed_graph.html')
with open(html_template_file, mode='r') as f:
html = f.read()
pass
# read javascript template
js_template_file = os.path.join(os.path.dirname(__file__), 'html_templates/d3_force_directed_graph.js')
with open(js_template_file, mode='r') as f:
js = f.read()
pass
# generate random identifier for SVG element, to avoid name clashes if used multiple times in a notebook
random_id_string = str(random.randrange(1000000,9999999))
# replace placeholder in both html and js templates
html = html.replace('%%unique-id%%', random_id_string)
js = js.replace('%%unique-id%%', random_id_string)
# substitute links and data
js = js.replace('%%links%%', str(graph_json_links))
js = js.replace('%%nodes%%', str(graph_json_nodes))
js = js.replace('%%edge_attribute%%', link_edge_name)
#print(html)
#print(js)
# display html in notebook cell
IPython.core.display.display_html(IPython.core.display.HTML(html))
# display (run) javascript in notebook cell
IPython.core.display.display_javascript(IPython.core.display.Javascript(data=js))
pass
# bar chart
def plot_bar_chart(data_series):
plt.bar(numpy.arange(len(data_series)), data_series.values)
pass
# scatter 2-d plot
def plot_scatter_chart(xy_data):
# plot as scatter plot
p = plt.subplot(111)
p.axis('scaled');
#p.axis([-2, 2, -2, 2]);
p.axhline(y=0, color='lightgrey');
p.axvline(x=0, color='lightgrey')
p.set_yticklabels([]);
p.set_xticklabels([])
p.set_title("S_reduced_VT")
p.plot(xy_data.iloc[0], xy_data.iloc[1], 'ro')
plt.show()
pass
|
gpl-2.0
|
rakshit-agrawal/crowdsource-platform
|
fixtures/createJson.py
|
16
|
2463
|
__author__ = 'Megha'
# Script to transfer csv containing data about various models to json
# Input csv file constituting of the model data
# Output json file representing the csv data as json object
# Assumes model name to be first line
# Field names of the model on the second line
# Data seperated by __DELIM__
# Example:
# L01 ModelName: registrationmodel
# L02 FieldNames: user,activation_key,created_timestamp,last_updated
# L03 Data: 1,qwer,2015-05-01T00:17:40.085Z,2015-05-01T00:17:40.085Z
# L04 Data: 2,assd,2015-05-01T00:17:40.085Z,2015-05-01T00:17:40.085Z
import numpy as np
import pandas as pd
import json as json
__MODULE_NAME__ = 7 #Number of lines after which Model Name
__INPUT_FILE__ = 'meghaWorkerData.csv'
__OUTPUT_FILE__ = 'meghaWorkerData.json'
__NEWLINE__ = '\n'
__KEY1__ = 0
__KEY2__ = 0
__DELIM__ = ','
__APPEND__ = 'crowdsourcing.'
__KEY_MODEL__ = 'model'
__KEY_FIELDS__ = 'fields'
__KEY_PK__ = 'pk'
def create_dict(input_dict, module, data_collection):
for key, value in input_dict.items():
data_dict = {}
data_dict[__KEY_FIELDS__] = value
data_dict[__KEY_PK__] = key
data_dict[__KEY_MODEL__] = __APPEND__ + module
data_collection.append(data_dict)
return data_collection
def create_data_json(__FILE__):
in_fp = open(__INPUT_FILE__, 'rb')
file_lines = in_fp.readlines()
in_fp.close()
data_collection = []
for line_no in range(0, len(file_lines)):
if line_no % __MODULE_NAME__ == 0:
columns = file_lines[line_no + 1].strip(__NEWLINE__).split(__DELIM__)
instance1 = file_lines[line_no + 2].strip(__NEWLINE__).split(__DELIM__)
instance2 = file_lines[line_no + 3].strip(__NEWLINE__).split(__DELIM__)
instance3 = file_lines[line_no + 4].strip(__NEWLINE__).split(__DELIM__)
instance4 = file_lines[line_no + 5].strip(__NEWLINE__).split(__DELIM__)
instance5 = file_lines[line_no + 6].strip(__NEWLINE__).split(__DELIM__)
data = np.array([instance1,instance2,instance3,instance4,instance5])
df = pd.DataFrame(data, columns = columns)
create_dict(df.transpose().to_dict(), file_lines[line_no].strip(__NEWLINE__), data_collection)
del(df)
print(data_collection)
out_fp = open(__OUTPUT_FILE__, 'wb')
out_fp.write(json.dumps(data_collection, indent = 2))
out_fp.close()
if __name__ == '__main__':
create_data_json (__INPUT_FILE__)
|
mit
|
mdesco/dipy
|
doc/examples/quick_start.py
|
13
|
6225
|
"""
=========================
Getting started with Dipy
=========================
In diffusion MRI (dMRI) usually we use three types of files, a Nifti file with the
diffusion weighted data, and two text files one with b-values and
one with the b-vectors.
In Dipy we provide tools to load and process these files and we also provide
access to publically available datasets for those who haven't acquired yet
their own datasets.
With the following commands we can download a dMRI dataset
"""
from dipy.data import fetch_sherbrooke_3shell
fetch_sherbrooke_3shell()
"""
By default these datasets will go in the .dipy folder inside your home directory.
Here is how you can access them.
"""
from os.path import expanduser, join
home = expanduser('~')
"""
``dname`` holds the directory name where the 3 files are in.
"""
dname = join(home, '.dipy', 'sherbrooke_3shell')
"""
Here, we show the complete filenames of the 3 files
"""
fdwi = join(dname, 'HARDI193.nii.gz')
print(fdwi)
fbval = join(dname, 'HARDI193.bval')
print(fbval)
fbvec = join(dname, 'HARDI193.bvec')
print(fbvec)
"""
``/home/username/.dipy/sherbrooke_3shell/HARDI193.nii.gz``
``/home/username/.dipy/sherbrooke_3shell/HARDI193.bval``
``/home/username/.dipy/sherbrooke_3shell/HARDI193.bvec``
Now, that we have their filenames we can start checking what these look like.
Let's start first by loading the dMRI datasets. For this purpose, we
use a python library called nibabel_ which enables us to read and write
neuroimaging-specific file formats.
"""
import nibabel as nib
img = nib.load(fdwi)
data = img.get_data()
"""
``data`` is a 4D array where the first 3 dimensions are the i, j, k voxel
coordinates and the last dimension is the number of non-weighted (S0s) and
diffusion-weighted volumes.
We can very easily check the size of ``data`` in the following way:
"""
print(data.shape)
"""
``(128, 128, 60, 194)``
We can also check the dimensions of each voxel in the following way:
"""
print(img.get_header().get_zooms()[:3])
"""
``(2.0, 2.0, 2.0)``
We can quickly visualize the results using matplotlib_. For example,
let's show here the middle axial slices of volume 0 and volume 10.
"""
import matplotlib.pyplot as plt
axial_middle = data.shape[2] / 2
plt.figure('Showing the datasets')
plt.subplot(1, 2, 1).set_axis_off()
plt.imshow(data[:, :, axial_middle, 0].T, cmap='gray', origin='lower')
plt.subplot(1, 2, 2).set_axis_off()
plt.imshow(data[:, :, axial_middle, 10].T, cmap='gray', origin='lower')
plt.show()
plt.savefig('data.png', bbox_inches='tight')
"""
.. figure:: data.png
:align: center
**Showing the middle axial slice without (left) and with (right) diffusion weighting**.
The next step is to load the b-values and b-vectors from the disk using
the function ``read_bvals_bvecs``.
"""
from dipy.io import read_bvals_bvecs
bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
"""
In Dipy, we use an object called ``GradientTable`` which holds all the acquision
specific parameters, e.g. b-values, b-vectors, timings and others. To create this
object you can use the function ``gradient_table``.
"""
from dipy.core.gradients import gradient_table
gtab = gradient_table(bvals, bvecs)
"""
Finally, you can use ``gtab`` (the GradientTable object) to show some information about the
acquisition parameters
"""
print(gtab.info)
"""
B-values shape (193,)
min 0.000000
max 3500.000000
B-vectors shape (193, 3)
min -0.964050
max 0.999992
You, can also see the b-values using:
"""
print(gtab.bvals)
"""
::
[ 0. 1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000.
1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000.
1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000.
1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000.
1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000.
1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000. 1000.
1000. 1000. 1000. 1000. 1000. 2000. 2000. 2000. 2000. 2000.
2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000.
2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000.
2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000.
2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000.
2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000.
2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000. 2000. 3500.
3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500.
3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500.
3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500.
3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500.
3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500.
3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500. 3500.
3500. 3500. 3500.]
Or, for example the 10 first b-vectors using:
"""
print(gtab.bvecs[:10, :])
"""
::
array([[ 0. , 0. , 0. ],
[ 0.999979 , -0.00504001, -0.00402795],
[ 0. , 0.999992 , -0.00398794],
[-0.0257055 , 0.653861 , -0.756178 ],
[ 0.589518 , -0.769236 , -0.246462 ],
[-0.235785 , -0.529095 , -0.815147 ],
[-0.893578 , -0.263559 , -0.363394 ],
[ 0.79784 , 0.133726 , -0.587851 ],
[ 0.232937 , 0.931884 , -0.278087 ],
[ 0.93672 , 0.144139 , -0.31903 ]])
``gtab`` can be used to tell what part of the data is the S0 volumes
(volumes which correspond to b-values of 0).
"""
S0s = data[:, :, :, gtab.b0s_mask]
"""
Here, we had only 1 S0 as we can verify by looking at the dimensions of S0s
"""
print(S0s.shape)
"""
``(128, 128, 60, 1)``
Just, for fun let's save this in a new Nifti file.
"""
nib.save(nib.Nifti1Image(S0s, img.get_affine()), 'HARDI193_S0.nii.gz')
"""
Now, that we learned how to load dMRI datasets we can start the analysis.
See example :ref:`example_reconst_dti` to learn how to create FA maps.
.. include:: ../links_names.inc
"""
|
bsd-3-clause
|
Srisai85/scikit-learn
|
examples/covariance/plot_robust_vs_empirical_covariance.py
|
248
|
6359
|
r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
|
bsd-3-clause
|
JackKelly/neuralnilm_prototype
|
scripts/e144.py
|
2
|
5135
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd LSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
"""
def set_save_plot_interval(net, epoch):
net.save_plot_interval = SAVE_PLOT_INTERVAL
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=50,
subsample_target=5,
include_diff=True
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=250,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': LSTMLayer,
'num_units': 30,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 60,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid,
'W': Uniform(1)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': LSTMLayer,
'num_units': 60,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('abcd'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as e:
print("EXCEPTION:", e)
if __name__ == "__main__":
main()
|
mit
|
aewhatley/scikit-learn
|
sklearn/cluster/tests/test_hierarchical.py
|
230
|
19795
|
"""
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
|
bsd-3-clause
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/sklearn/tests/test_cross_validation.py
|
6
|
46787
|
"""Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
with warnings.catch_warnings(record=True):
# deprecated
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
|
gpl-2.0
|
UIUC-SULLIVAN/ThesisProject_Andrea_Mattera
|
Coupling with weather/Classes.py
|
4
|
51132
|
# coding: utf-8
# In[6]:
import numpy as np
from sklearn import preprocessing
import pandas as pd
import datetime as dt
from sklearn.metrics import mean_absolute_error,mean_squared_error,median_absolute_error
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.decomposition import PCA as sklearnPCA ,KernelPCA,FactorAnalysis,IncrementalPCA,FastICA
from scipy.stats.mstats import normaltest
from scipy.stats import spearmanr
from math import *
import matplotlib.pyplot as plt
import time
from sklearn.grid_search import GridSearchCV
from sklearn.cluster import MeanShift, estimate_bandwidth
import matplotlib.pyplot as plt
import urllib2
import json
# #Classes
# In[ ]:
class Sensor(object):
ID=None
owner=None
days={} #days is a dictionary containing a dataframe with the safecast data for that specific day
daysList=[] #it contains the days of the measurement, it is a list of the keys of days dictionary
dataset=None
latitude=None
longitude=None
stationary=None
def __init__(self,a,date='Captured Time'):
#given a series of measurement it creates a dataframe for every day
df=pd.DataFrame(a)
df=df.sort('Captured Time')
self.latitude,self.longitude,self.ID=df[['Latitude','Longitude','Sensor']].iloc[0].values
i=lambda x: str(x.year) + '-' + str(x.month) + '-' +str(x.day) #I take just year,month and day
try:
dates= df[date].apply(i)
except AttributeError:
df=df.convert_objects(convert_dates='coerce')
dates= df[date].apply(i)
df['Date']=dates
daysList=dates.unique()
self.stationary=Sensor.isStationary(df)
self.days=dict([(day,df[df['Date']==day]) for day in daysList])
self.daysList=daysList
def apply(self,f):
'''Apply a generic function on historical data'''
self.days.update((x, f(y)) for x, y in self.days.items())
return self
def addDay(self,a,date='Captured Time'):
''' It adds another day to the days dictionary
'''
df=pd.DataFrame(a)
i=lambda x: str(x.year) + '-' + str(x.month) + '-' +str(x.day) #I take just year,month and day
try:
dates= df[date].apply(i)
except AttributeError:
df=df.convert_objects(convert_dates='coerce')
dates= df[date].apply(i)
df['Day']=dates
daysList=dates.unique()
[self.days.update({day:df[df['Day']==day]}) for day in daysList]
[self.daysList.append(day) for day in daysList]
return self
def cleanAll(self):
'''It cleans all the measurements applying the static method clean to every day
'''
self.days.update((x, Sensor.clean(y)) for x, y in self.days.items())
return self
@staticmethod
def clean(df):
'''It cleans a single day
'''
from string import strip
columns=['Captured Time','Latitude','Longitude','Value','Unit','ID','Height','Loader ID','Sensor','Distance']
df=df[columns]
#df=df.dropna(1) #empty rows are deleted
df=df.drop_duplicates('Captured Time') #sometimes there are some duplicates
df.index=xrange(0,len(df))
today=dt.datetime.today().strftime("%Y-%m-%d %H:%M:%S")
df=df.convert_objects(convert_dates='coerce')
df=df[df['Captured Time']<=today] #every row with date field incorrect is deleted
df['Unit']=df['Unit'].apply(strip)
df=df[df.Unit=='cpm'] #all the units that are not expressed in cpm are deleted
#I should add some lines to remove special character like \n and \t
return df
@staticmethod
def convertDate(df,date='Captured Time'):
df[date]=0
try:
f = lambda x: str(int(x.Year)) + '-'+ str(int(x.Month)) + '-' + str(int(x.Day)) + ' ' + str(int(x.Hour)) + ':' + str(int(x.Minute)) + ':' + '00'
df[date]=df.apply(f,1)
except AttributeError:
diz={0:'00',0.25:'15',0.5:'30',0.75:'45'}
g = lambda x: str(int(x.Year)) + '-'+ str(int(x.Month)) + '-' + str(int(x.Day)) + ' ' + str(int(x.Hour)) + ':' + diz[x.Hour - int(x.Hour)] + ':' + '00'
df[date]=df.apply(g,1)
df=df.drop(['Year','Month','Day','Hour'],axis=1)
fmt="%Y-%m-%d %H:%M:%S"
try:
df[date]=df[date].apply(dt.datetime.strptime(date,fmt))
except ValueError:
pass
return df
def createDataset(self):
'''It merge all the dataframe in the days dictionary in a single dataframe
'''
tmp=self.days.values()
df = pd.concat(tmp)
self.dataset=df#.sort('Captured Time')
return self.dataset
def delDay(self,day):
try:
self.days.pop(day)
self.daysList.remove(day)
except KeyError:
print 'The day ' + str(day) + ' is not present'
return self
return self
@staticmethod
def distance(a1,b1,a2,b2):
'''Evaluates the distance in m between two points with coordinates expressed in
Latitude and Longitude
'''
a1=a1*np.pi/180
a2=a2*np.pi/180
b1=b1*np.pi/180
b2=b2*np.pi/180
return np.arccos(np.cos(a1-a2)*np.cos(b1)*np.cos(b2)+np.sin(b1)*np.sin(b2))*6378*1000
def extractDates(self,date='Captured Time',delta=0.25):
'''It applies the extracDate static method on every day
'''
self.days.update((x, Sensor.extractDate(y,date,delta)) for x, y in self.days.items())
return self
@staticmethod
def extractDate(df,date='Captured Time',delta=0.25):
'''Add two different fields useful to couple with weather data.
The field 'DAY': year-month-day and the field 'Hour': hour.minutes
'''
import datetime as dt
fmt="%Y-%m-%d"
i=lambda x: str(x.year) + '-' + str(x.month) + '-' +str(x.day) #I take just year,month and day
try:
dates= df[date].apply(i)
except AttributeError:
df=df.convert_objects(convert_dates='coerce')
dates= df[date].apply(i)
g = lambda x: dt.datetime.strptime(x,fmt)
dates= dates.apply(g)
h=lambda x : str(x).split(' ')[0]#the conversion adds hour,minutes and seconds
dates= dates.apply(h) #I drop it and return a list of string
df['Year']=df[date].apply(lambda x : x.year)
df['Month']=df[date].apply(lambda x: x.month)
df['Day']=df[date].apply(lambda x: x.day)
tmp=df[date].apply(lambda x: x.to_datetime())
df['Hour']=tmp.apply(lambda x: x.hour)
tmp=df[date].apply(lambda x: x.minute)
f=lambda x: round(round(x/(60*delta))*delta,3)
df['Hour']=df['Hour']+tmp.apply(f)
df['Hour']=df['Hour'].replace(24,0.00)
return df
def getDays(self):
print self.daysList
@staticmethod
def isStationary(df):
'''It returns True if the measurement in df belong to a stationary detector
'''
l1=df.Latitude.iloc[0]
l2=df.Longitude.iloc[0]
m1=df.Latitude.iloc[len(df)-1]
m2=df.Longitude.iloc[len(df)-1]
if df.Distance.max()>15: #it checks if the distance between two consevutive measurements is more than
#the maximum value of gps spatial inaccuracy
return False
if Sensor.distance(l1,l2,m1,m2)>100: #it checks if the distance between the first and the last point
#is too much
return False
if df.Distance.sum()>2*len(df):
return False
return True
def timeSampling(self,day):
'''It returns the time sampling of the measurement in the day indicated
'''
from numpy import median
df=self.days[day]
df=df.clean()
return median([(df['Captured Time'].loc[n]-df['Captured Time'].loc[m]).total_seconds() for n,m in zip(xrange(1,len(df)),xrange(0,(len(df)-1)))])
def to_csv(self,filename):
with open(filename, 'a') as f:
self.dataset.to_csv(f,index=False,float_format = '%.4f',header=False)
class Weather(object):
'''The weather info for every day requested are saved in the dictionary historical {'year-month-day:weather df}
'''
lat=None
lon=None
historical={}
stations=None
state=None
icao=None
dataset=pd.DataFrame()
daysUnavailable=[]
daysList=[]
closestStation=None
key=0
def __init__(self,lat,lon):
'''Given latitude and longitude it find the closest weather station
it will be used after to find weather informations'''
self.parser=ParseWeather()
self.city,self.country,self.state=self.parser.getLocation(lat,lon)
def addDay(self,a,date='DateUTC'):
'''Add another day to the historical dictionary'''
df=pd.DataFrame(a)
i=lambda x: str(x.year) + '-' + str(x.month) + '-' +str(x.day) #I take just year,month and day
try:
dates= df[date].apply(i)
except AttributeError:
df=df.convert_objects(convert_dates='coerce')
dates= df[date].apply(i)
df['Day']=dates
daysList=dates.unique()
[self.historical.update({day:df[df['Day']==day]}) for day in daysList]
[self.daysList.append(day) for day in daysList]
return self
def apply(self,f):
'''Apply a function on historical data'''
self.historical.update((x, f(y)) for x, y in self.historical.items())
return self
@staticmethod
def clean(df):
'''Clean a specific dataframe containing weather informations'''
info=df.copy()
info=info.convert_objects(convert_numeric=True)
pre={'Light Rain':1,'Heavy Rain':1,'Rain':1,'Light Rain Mist':1, 'Heavy Rain Mist':1,'Rain Mist':1,'Light Rain Showers':1,'Heavy Rain Showers':1, 'Rain Showers':1,'Light Thunderstorms and Rain':1,'Heavy Thunderstorms and Rain':1, 'Thunderstorms and Rain':1,'Light Freezing Drizzle':1,'Heavy Freezing Drizzle':1, 'Freezing Drizzle':1,'Light Freezing Rain':1,'Heavy Freezing Rain':1,'Freezing Rain':1, 'Light Snow':1,'Heavy Snow':1,'Snow':1,'Light Snow Grains':1,'Heavy Snow Grains':1, 'Snow Grains':1,'LightSnow Showers':1,'Heavy Snow Showers':1,'Snow Showers':1,
'Light Ice Crystals':1,'Heavy Ice Crystals':1,'Ice Crystals':1,'Light Ice Pellets':1, \
'Heavy Ice Pellets':1,'Ice Pellets':1,'LightIce Pellet Showers':1,'HeavyIce Pellet Showers':1, \
'Ice Pellet Showers':1,'LightHail Showers':1,'Heavy Hail Showers':1, \
'Hail Showers':1,'Light Small Hail Showers':1,'Heavy Small Hail Showers':1, \
'Small Hail Showers':1}
f=lambda x: pre.get(str(x) , 0)
info['Conditions']=info['Conditions'].apply(f)
#cleaning of NaN and other unexpected values
info.PrecipitationIn=info.PrecipitationIn.fillna(value=0)
info['Wind SpeedMPH']=info['Wind SpeedMPH'].fillna(value=0)
info['Wind Direction']=info['Wind Direction'].replace('Calm',0)
info['Wind SpeedMPH']=info['Wind SpeedMPH'].replace('Calm',0)
#windspeedmph contains strings so it is considered as a generic object type, I convert it in float type
info['Wind SpeedMPH']=info['Wind SpeedMPH'].apply(float)
t=info.TemperatureF.copy()
h=info.Humidity.copy()
s=info['Sea Level PressureIn'].copy()
d=info['Dew PointF'].copy()
p=info['PrecipitationIn'].copy()
#sometimes the weather informations show unexpected values (as -9999)
t[t < -100] = np.NaN
h[h<0]=np.NaN
s[s<0]=np.NaN
d[d<0]=np.NaN
p[p<0]=0
info['TemperatureF']=t
info['Humidity']=h
info['Sea Level PressureIn']=s
info['Dew PointF']=d
info['PrecipitationIn']=p
return info
def conditionsOccurred(self,graph=False):
'''It returns the weather conditions occurred in the dataset'''
conditions=self.dataset.Conditions.value_counts()
print conditions
self.conditions=self.dataset.Conditions.value_counts()
if graph:
conditions.plot(kind='barh')
return self
def createDataset(self):
'''It merges all the dataframe in the historical dictionary in a single dataframe
'''
tmp=self.historical.values()
df = pd.concat(tmp)
self.dataset=df#.sort('DateUTC')
return self.dataset
@staticmethod
def extractHour(df,date='DateUTC',delta=0.25):
'''It creates a new field hour
The field contains the hour in the format Hour.quarters (i.e 13.25 are 13 hours and 15 mins)'''
f=lambda x: round(round(x/(60*delta))*delta,3)
try:
hour=df[date].apply(lambda x: x.hour)
except AttributeError:
df[date]=df[date].convert_objects(convert_dates='coerce')
hour=df[date].apply(lambda x: x.hour)
minute=df[date].dt.minute.apply(f)
df['Hour']=hour+minute
df['Hour']=df['Hour'].replace(24,0.00)
return df
def extractHours(self,date='DateUTC',delta=0.25):
'''It applies the extractHour static method on every day
'''
self.historical.update((x, Weather.extractHour(y,date,delta)) for x, y in self.historical.items() )
return self
def getDays(self):
'''It simply prints the days with weather information available in the instance'''
print self.weather.keys()
def getHistorical(self, date):
'''Given a specific day it extract the weather information from wunderground.com
'''
key=date[:10]
fmt="%Y-%m-%d"
date=dt.datetime.strptime(key,fmt)
day=date.day
date1=date-dt.timedelta(days=1)
date=str(date)
date1=str(date1)
df1=self.parser.getWeather(date,self.city,self.state)
df2=self.parser.getWeather(date1,self.city,self.state)
df1['Day']=df1['DateUTC'].apply(lambda x: x.day)
df2['Day']=df2['DateUTC'].apply(lambda x: x.day)
df1=df1[df1['Day']==day]
df2=df2[df2['Day']==day]
df=df1.append(df2)
df=df.drop('Day',1)
df=Weather.clean(df)
self.historical[key]=df
self.daysList.append(key)
df=Weather.clean(df)
return df
def timeSampling(self,date='DateUTC'):
from numpy import median
df=self
df=df.clean()
return median([(df[date].loc[n]-df[date].loc[m]).total_seconds() for n,m in zip(xrange(1,len(df)),xrange(0,(len(df)-1)))])
class Model(object):
'''This class contains method to prediction the background radiation using a dataframe with background
and weather informations
'''
debug={}
outliers=None
reducedDatasets=None
weather_columns=['Humidity','TemperatureF','Sea Level PressureIn','PrecipitationIn','Dew PointF','Conditions','Wind SpeedMPH']
out_columns=['Value']
#model_columns=['Value','PrecipitationIn','Humidity','Dew PointF','Sea Level PressureIn','TemperatureF']
columns=['Captured Time','Humidity','TemperatureF','Sea Level PressureIn','Conditions','PrecipitationIn','Dew PointF','Value','Wind SpeedMPH']
def __init__(self,df):
self.ModelInputs={}
self.ModelOutput=None
self.prediction=None
self.metrics={}
self.Threats=[]
self.OutputTest={}
self.CorrelationTable=pd.DataFrame()
self.datasetsAvailable=['Dataset']
self.Sensor=df.Sensor.iloc[0]
self.model_columns=['PrecipitationIn','Humidity','Dew PointF','Sea Level PressureIn','TemperatureF']
'''Define a model object '''
df=df[Model.columns]
df=df.convert_objects(convert_dates='coerce')
df=self.clean(df)
t=df['Captured Time'].iloc[0]
f=lambda x: (x-t).total_seconds()
index=df['Captured Time'].apply(f)
#df=df.drop('Captured Time',1)
self.time=index
df.index=index
self.dataset=df
def applyOnInputs(self,method,inp,f=None,window=0,percentage=60):
'''It applies a built-in methods or a custom function f to the input variables
Methods available:
'standardize' , it applies the standardization method of sklearn.preprocessing.scale
'''
if not(self.ModelInputs):
self.getInput()
index=int(percentage*len(self.dataset)/100)
d={'Train':self.ModelInputs[inp][:index,:],'Test':self.ModelInputs[inp][index:,:]}
if method=='standardize':
d.update((x, preprocessing.scale(y)) for x, y in d.items())
else:
d.update((x, f(y)) for x, y in d.items())
#debug
#dataset=pd.DataFrame(self.ModelInputs['Dataset'])
#dataset['Output']=self.ModelOutput
#self.debug['ApplyOnInputs']=dataset
###
self.ModelInputs[inp]=np.append(d['Train'],d['Test'],axis=0)
return self
def applyOnOutput(self,method,f=None,window=0,percentage=60):
'''It applies a built-in methods or a custom function f to the output variable
Methods available: 'movingaverage', it requires the variable window
'standardize' , it applies the standardization method of sklearn.preprocessing.scale
'''
if self.ModelOutput==None:
self.getOutput()
index=int(percentage*len(self.dataset)/100)
self.OutputTest['Original']=self.ModelOutput[index:]
#this function it's used to apply some filtering to the output
#for this reason the data are splitted , in this way every filtering technique won't be anticasual
#i.e. a moving average filtering on the train part will consider also some samples from the test part
#that belong ideally to the "future"
d={'Train':self.ModelOutput[:index],'Test':self.ModelOutput[index:]}
if method=='movingaverage':
if not(window):
raise ValueError('A value for the window is required')
d.update((x, Model.moving_average(y,n=window)) for x, y in d.items())
elif method=='standardize':
self.OutputTest['mean']=np.mean(d['Train'])
self.OutputTest['std']=np.std(d['Train'])
d.update((x, preprocessing.scale(y)) for x, y in d.items())
else:
d.update((x, f(y)) for x, y in d.items())
newOutput=np.append(d['Train'],d['Test'])
#the moving_average could drop some values at the end of the time series, so if this happens the last
#values is repeated to restore the original dimension
check=len(self.ModelOutput)-len(newOutput)
if check>0:
newOutput=np.append(newOutput,newOutput[-check:])
self.ModelOutput=newOutput
''' #debug
dataset=pd.DataFrame(self.ModelInputs['Dataset'])
dataset['Output']=self.ModelOutput
self.debug['ApplyOnOutputs']=dataset
###'''
return self
def clean(self,dataset):
dataset.Value=dataset.Value.replace(0,np.nan)
#a weighted interpolation is applied on a windows that correspond to a period of 3 hours
#just for the weather conditions
colnames=['Humidity','TemperatureF','Sea Level PressureIn','Conditions','PrecipitationIn','Dew PointF']
dataset[colnames]=dataset[colnames].replace(np.nan,999)
#the rolling apply function require that there are no nan values, so I use a dummy number
dataset[colnames]=pd.rolling_apply(dataset[colnames],13,Model.weightedInterp)
#at the end a linear interpolation it is used on value field and to fulfill the weather conditions in
#the case that some period had no value to interpolate
dataset=dataset.interpolate(method='linear')
dataset=dataset.dropna() #it drops the NaT captured Time
return dataset
@staticmethod
def clustering(var1,var2):
'''Given two variables it find the clusters according the Meanshift algorithm
The current function is used by the remove_outliers method
'''
X=[var1,var2]
X=np.array(X)
X=X.T
bandwidth = estimate_bandwidth(X, quantile=0.9, n_samples=500) #estimation of bandwidth parameter needed for the
#clustering
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
tmp=pd.DataFrame(X)
tmp['Label']=labels
return tmp
def conditionsOccurred(self,graph=False):
'''It returns the weather conditions occurred in the dataset, if the Condition field has not transformed in
a numerical field yet
'''
conditions=self.dataset.Conditions.value_counts()
print conditions
self.conditions=self.dataset.Conditions.value_counts()
if graph:
conditions.plot(kind='barh')
return self
@staticmethod
def createDataset(sens,printing=False,filename='Stationary_data_with_weather.csv'):
'''This function instantiates the objects Weather and Sensor, use their method to clean and collect informations
Then merge them in a dataset containing weather and radiation information
'''
w=None
s=Sensor(sens)
s=s.cleanAll()
sensor=s.extractDates()
#value of lat and lon needed to instantiate the weather class
lat,lon=sensor.latitude,sensor.longitude
w= Weather(lat,lon)
for day in sensor.daysList:
w.getHistorical(day)
#the historical weather has a sampling time of 1 hour, so I resample my sensor data every (15 min default)
#taking the median of the value in that period
wea=w.extractHours()
f= lambda x: x.groupby(x.Hour).median()
wea=wea.apply(f)
wea=wea.apply(lambda x: x.drop('Hour',1))
sensor=sensor.apply(f)
#pieces contains a list of dataframe corresponding to a single day of measurements coupled with the weater
#dataframe with all the measurements coupled
try:
pieces=[sensor.days[date].join(wea.historical[date]) for date in wea.daysList if not(wea.historical[date].empty) ]
except ValueError:
return pd.DataFrame()
#to make the single days well sampled the holes are filled with a linear interpolation method
#the first and the last are skipped because the first piece probably doesn't start at midnight so it would be filled
#with NaN
#for the last is the same, it probably doesn't finish at midnight
filled=[p.reindex(np.arange(0,24,0.25)).interpolate(method='linear') for num,p in enumerate(pieces) if (num!=0 and num!=len(pieces)-1) ]
try:
filled.insert(0,pieces[0])
except IndexError:
return pd.DataFrame()
filled.append(pieces[-1])
try:
dataset=pd.concat(filled)
except ValueError:
return pd.DataFrame()
#after the median on every hour all the field that were string become NaN or are dropped
dataset.dropna(1,how='all')
dataset = dataset[np.isfinite(dataset['Sensor'])]
dataset['Hour']=dataset.index
dataset.drop
#in the line below the field Captured Time is recreated
dataset=Sensor.convertDate(dataset)
if printing:
with open(filename, 'a') as f:
dataset.to_csv(f,index=False,float_format = '%.4f',header=False)
return dataset
def dimensionalityReduction(self,nr=5):
'''It applies all the dimensionality reduction techniques available in this class:
Techniques available:
'PCA'
'FactorAnalysis'
'KPCArbf','KPCApoly'
'KPCAcosine','KPCAsigmoid'
'IPCA'
'FastICADeflation'
'FastICAParallel'
'Isomap'
'LLE'
'LLEmodified'
'LLEltsa'
'''
dataset=self.ModelInputs['Dataset']
sklearn_pca = sklearnPCA(n_components=nr)
p_components = sklearn_pca.fit_transform(dataset)
fa=FactorAnalysis(n_components=nr)
factors=fa.fit_transform(dataset)
kpca=KernelPCA(nr,kernel='rbf')
rbf=kpca.fit_transform(dataset)
kpca=KernelPCA(nr,kernel='poly')
poly=kpca.fit_transform(dataset)
kpca=KernelPCA(nr,kernel='cosine')
cosine=kpca.fit_transform(dataset)
kpca=KernelPCA(nr,kernel='sigmoid')
sigmoid=kpca.fit_transform(dataset)
ipca=IncrementalPCA(nr)
i_components=ipca.fit_transform(dataset)
fip=FastICA(nr,algorithm='parallel')
fid=FastICA(nr,algorithm='deflation')
ficaD=fip.fit_transform(dataset)
ficaP=fid.fit_transform(dataset)
'''isomap=Isomap(n_components=nr).fit_transform(dataset)
try:
lle1=LocallyLinearEmbedding(n_components=nr).fit_transform(dataset)
except ValueError:
lle1=LocallyLinearEmbedding(n_components=nr,eigen_solver='dense').fit_transform(dataset)
try:
lle2=LocallyLinearEmbedding(n_components=nr,method='modified').fit_transform(dataset)
except ValueError:
lle2=LocallyLinearEmbedding(n_components=nr,method='modified',eigen_solver='dense').fit_transform(dataset)
try:
lle3=LocallyLinearEmbedding(n_components=nr,method='ltsa').fit_transform(dataset)
except ValueError:
lle3=LocallyLinearEmbedding(n_components=nr,method='ltsa',eigen_solver='dense').fit_transform(dataset)'''
values=[p_components,factors,rbf,poly,cosine,sigmoid,i_components,ficaD,ficaP]#,isomap,lle1,lle2,lle3]
keys=['PCA','FactorAnalysis','KPCArbf','KPCApoly','KPCAcosine','KPCAsigmoid','IPCA','FastICADeflation','FastICAParallel']#,'Isomap','LLE','LLEmodified','LLEltsa']
self.ModelInputs.update(dict(zip(keys, values)))
[self.datasetsAvailable.append(key) for key in keys ]
#debug
#dataset=pd.DataFrame(self.ModelInputs['Dataset'])
#dataset['Output']=self.ModelOutput
#self.debug['Dimensionalityreduction']=dataset
###
return self
@staticmethod
def extractMetrics(pred,test_y):
'''It extracts three different metrics: mean absolute error,median absolute error,mean square error
'''
try:
meanae=mean_absolute_error(test_y,pred)
except ValueError:
#sometimes the moving average filter on the output reduce the dimensionality of it
#so some value of the predition is dropped
pred=pred[:len(test_y)-len(pred)]
meanae=mean_absolute_error(test_y,pred)
mae=median_absolute_error(test_y,pred)
mse=mean_squared_error(test_y,pred)
return meanae,mae,mse
def findCorrelations(self,alfa=5,duringRain=False,minimumLength=500):
'''It discovers if the input variables are correlated with the output making use of Spearman correlation technique
The alfa parameter define the level of significance of the test,it is expressed in percentage
If the p-value evaluated is less than alfa/100 the Null Hypotesis (there is no correlation between the variables) is refused'''
e=self.dataset
if duringRain:
e=e[e['Conditions']==1]
e=e[Model.weather_columns]
e['Value']=self.dataset.Value.copy()
e=e.apply(preprocessing.scale)
if len(e)<minimumLength:
self.CorrelationTable=pd.DataFrame()
return self
pthresh=alfa/100.0
val=e.Value.values
temp=spearmanr(e.TemperatureF.values,val)
hum=spearmanr(e.Humidity.values,val)
sea=spearmanr(e['Sea Level PressureIn'].values,val)
prec=spearmanr(e.PrecipitationIn.values,val)
dew=spearmanr(e['Dew PointF'].values,val)
df=pd.DataFrame({'Temperature':temp,'Sea Level PressureIn':sea,'PrecipitationIn':prec,'Humidity':hum,'Dew PointF':dew},index=['Pearson coefficients','p-values'])
def test(p,threshold):
if p<threshold:
return 'Reject H0'
else:
return 'Accept H0'
df.loc['Results']=[test(p,pthresh) for p in df.loc['p-values']]
self.CorrelationTable=df
return self
def GBregression(self,percentage=60,inp='Dataset',n_estimators=100, learning_rate=0.1,max_depth=1, random_state=0, loss='ls'):
'''It applies the ensamble method of gradient boosting trees'''
X=y=prediction=metrics=None
X=self.ModelInputs[inp] #input dataset
samples=int(percentage*len(X)/100) #evaluating the samples number given the percentage
x=X[:samples,:] #training input set
try:
y = self.ModelOutput[:samples] #training output set
except KeyError:
self.getOutput()
y = self.ModelOutput[:samples]
test_x=X[samples:,:] #testing input set
test_y=self.ModelOutput[samples:] # testing output set
gb=GradientBoostingRegressor(n_estimators=100, learning_rate=0.1,max_depth=1, random_state=0, loss='ls')
model=gb.fit(x,y)
prediction=model.predict(test_x)
self.prediction=prediction
self.OutputTest['Standardized']=test_y
metrics=Model.extractMetrics(prediction,test_y)
return prediction,np.median(metrics)
def getDatasetsAvailable(self):
self.datasetsAvailable=self.ModelInputs.keys()
return self.ModelInputs.keys()
def getInput(self):
X=self.dataset[self.model_columns].copy()
self.ModelInputs['Dataset']=X.as_matrix()
return self
def getOutput(self):
Y=self.dataset.copy()
try:
self.ModelOutput=Y[Model.out_columns].as_matrix()
except KeyError:
self.ModelOutput=self.dataset['Output'].as_matrix() #if the preparare dataset has been called
#the output is 'Output' instead of 'Values
return self
def insertThreat(self,testPercentage=40,wLength=4,meanP=1.1):
'''Method to simulate and insert a threat in the part of the output series that will be used as test
wLenght: the lenght of the window in which the threat will be inserted
testPercentage: indicates the percentage of the test dataset
meanP: is the mean value of the Poisson distribution from which the "threat" is extracted
'''
t=None
testPercentage=testPercentage/100.0
t=pd.DataFrame()
t['Value']=self.dataset.Value.copy()#create a copy of the output
startTest=int((1-testPercentage)*len(t)) #define the first index of the output that will be used as test
s=np.random.random_integers(startTest,len(t)) #find a random index in the test part of the output
values=np.random.poisson(t['Value'].mean()*meanP,wLength) #find random values from poisson distribution with E[x]=m
window=np.arange(s,s+4)*(self.dataset.index[1]-self.dataset.index[0]) #define the window
#the window is cleaned, the values are added and the other values are interpolated to maintain the continuity
t['Value'].loc[window]=values
#t.loc[window[1:-1]]=values
self.ThreatsIndex=t.copy()
self.ThreatsIndex['Value']=0
self.ThreatsIndex.loc[window]=1
d={'Train':t['Value'].iloc[:startTest],'Test':t['Value'].iloc[startTest:]}
d.update((x, preprocessing.scale(y)) for x, y in d.items())
self.Threats=np.append(d['Train'],d['Test'])#append the window in which there is the threat
self.dataset.Value=t['Value'].values #the threat is inserted in the dataset
return self
def KNregression(self,percentage,inp='Dataset',neighbors=5,weights='distance',algorithm='auto',leaf=30):
'''It evaluates a prediction using k-nearest neighbors regression approach
It returns a tuple: (prediction, median of three different metrics) '''
X=y=prediction=metrics=None
X=self.ModelInputs[inp] #input matrix
samples=int(percentage*len(X)/100) #evaulating the number of samples given the percentage
x=X[:samples,0:] #training input set
y = self.ModelOutput[:samples] # training output set
test_x=X[samples:,:] #testing input set
test_y=self.ModelOutput[samples:] #testing output set
knn=KNeighborsRegressor(n_neighbors=neighbors,weights=weights,algorithm=algorithm, leaf_size=leaf)
try:
model=knn.fit(x,y) #evaluating the model
except ValueError:
return np.nan,9999
prediction=model.predict(test_x) #evaluating of the prediction
self.prediction=prediction
self.OutputTest['Standardized']=test_y
metrics=Model.extractMetrics(prediction,test_y)
return prediction,np.median(metrics)
@staticmethod
def moving_average(a, n=3) :
''' Function that implements a moving average filter
[source]:http://stackoverflow.com/questions/14313510/moving-average-function-on-numpy-scipy
'''
first=np.array([a[0]])
last=np.array([a[-1]])
a=np.concatenate((first,a,last))
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def plotRadiationWeather(self):
'''It plots the Value field with each weather field separately
The function returns a plot object
'''
df=self.dataset
plt.figure()
stand=df.apply(preprocessing.scale,axis=0) #the data are normalized because they have different units
val=stand['Value'].as_matrix()
prec=stand['PrecipitationIn'].as_matrix()
dew=stand['Dew PointF'].as_matrix()
hum=stand['Humidity'].as_matrix()
press=stand['Sea Level PressureIn'].as_matrix()
temp=stand['TemperatureF'].as_matrix()
plt.subplot(3,3,1)
plt.plot(val,prec,'bo')
plt.ylabel('Precipitation')
plt.xlabel('Background Radiation')
plt.subplot(3,2,2)
plt.plot(val,dew,'ro')
plt.ylabel('Dew Point')
plt.xlabel('Background Radiation')
plt.subplot(3,2,3)
plt.plot(val,hum,'yo')
plt.ylabel('Humidity')
plt.xlabel('Background Radiation')
plt.subplot(3,2,4)
plt.plot(val,press,'go')
plt.ylabel('Sea Level Pressure')
plt.xlabel('Background Radiation')
plt.subplot(3,2,5)
plt.plot(val,temp,'mo')
plt.ylabel('Temperature')
plt.xlabel('Background Radiation')
plt.subplot(3,2,6)
plt.plot(val,prec,'bo')
plt.plot(val,dew,'ro')
plt.plot(val,hum,'yo')
plt.plot(val,press,'go')
plt.plot(val,temp,'mo')
#plt.legend(['Precipitation','DewPoint','Humidity','Sea Level Pressure','Temperature'])
plt.xlabel('Background Radiation')
plt.show()
def plotDataset(self):
self.dataset.plot(subplots=True)
plt.xlabel('Time')
plt.show()
def plotPrediction(self):
'''It creates a figure with two graphs: the real and the predicted output
the absolute error between them
'''
predicted=self.prediction
real=self.OutputTest['Standardized']#[abs(len(self.OutputTest['Standardized'])-len(self.prediction)):]
rmse=np.sqrt(mean_squared_error(predicted,real))
plt.figure()
plt.subplot(211)
plt.xlabel('Time')
plt.ylabel('Radiation ')
plt.title('Comparison between real and predicted output, RMSE=' + str(rmse))
plt.plot(predicted,'r')
plt.plot(real,'b')
plt.legend(['Predicted output','Real output'])
plt.subplot(212)
plt.xlabel('Time')
plt.ylabel('Absolute error')
plt.plot(abs(real-predicted),'m')
plt.show()
def prepareDataset(self,n=1,l=1,w=0):
X=self.dataset[Model.weather_columns].copy()
self.model_columns=Model.weather_columns[:] #this fake slicing provide a copy of the list
values=self.dataset.Value.copy()
output=values.shift(-l).copy()
vfield=[]
for m in xrange(0,n+1): #the n parameter sets how much new fields should be created
#if the present value of the output is at the time t there will be created n columns with
#output from 0,1,2,...t-1 , 0,1,2,...t-2, ....... 0,1,2,...t-n
field='Values-' + str(m)
vfield.append(field)
self.model_columns.append(field)
X[field]=values.shift(m) #the shift function creates the new fields
for k in xrange(1,w+1):
a=X[Model.weather_columns].shift(k)
newfields=[col+'-' +str(w) for col in a.columns]
a.columns=newfields
#[self.model_columns.append(f) for f in newfields]
X=pd.concat([X,a], axis=1)
X['Output']=output
X=X.dropna()
##debug
#dataset=X.copy()
#dataset['Output']=output.copy()
#self.debug['getInput']=dataset
##
self.dataset=X.copy()
return self
def reduceDataset(self,nr=3,method='PCA'):
'''It reduces the dimensionality of a given dataset using different techniques provided by Sklearn library
Methods available:
'PCA'
'FactorAnalysis'
'KPCArbf','KPCApoly'
'KPCAcosine','KPCAsigmoid'
'IPCA'
'FastICADeflation'
'FastICAParallel'
'Isomap'
'LLE'
'LLEmodified'
'LLEltsa'
'''
dataset=self.ModelInputs['Dataset']
#dataset=self.dataset[Model.in_columns]
#dataset=self.dataset[['Humidity','TemperatureF','Sea Level PressureIn','PrecipitationIn','Dew PointF','Value']]
#PCA
if method=='PCA':
sklearn_pca = sklearnPCA(n_components=nr)
reduced = sklearn_pca.fit_transform(dataset)
#Factor Analysis
elif method=='FactorAnalysis':
fa=FactorAnalysis(n_components=nr)
reduced=fa.fit_transform(dataset)
#kernel pca with rbf kernel
elif method=='KPCArbf':
kpca=KernelPCA(nr,kernel='rbf')
reduced=kpca.fit_transform(dataset)
#kernel pca with poly kernel
elif method=='KPCApoly':
kpca=KernelPCA(nr,kernel='poly')
reduced=kpca.fit_transform(dataset)
#kernel pca with cosine kernel
elif method=='KPCAcosine':
kpca=KernelPCA(nr,kernel='cosine')
reduced=kpca.fit_transform(dataset)
#kernel pca with sigmoid kernel
elif method=='KPCAsigmoid':
kpca=KernelPCA(nr,kernel='sigmoid')
reduced=kpca.fit_transform(dataset)
#ICA
elif method=='IPCA':
ipca=IncrementalPCA(nr)
reduced=ipca.fit_transform(dataset)
#Fast ICA
elif method=='FastICAParallel':
fip=FastICA(nr,algorithm='parallel')
reduced=fip.fit_transform(dataset)
elif method=='FastICADeflation':
fid=FastICA(nr,algorithm='deflation')
reduced=fid.fit_transform(dataset)
elif method == 'All':
self.dimensionalityReduction(nr=nr)
return self
self.ModelInputs.update({method:reduced})
self.datasetsAvailable.append(method)
return self
def remove_outliers(self):
'''It removes the outliers using the MeanShift clustering techniques
'''
dataset=self.dataset[self.model_columns].copy()
dataset['Value']=self.dataset.Value.copy()
stand=dataset.apply(preprocessing.scale,axis=0) #the data are standardized because they have different units
val=stand['Value'].as_matrix()
prec=stand['PrecipitationIn'].as_matrix()
dew=stand['Dew PointF'].as_matrix()
hum=stand['Humidity'].as_matrix()
press=stand['Sea Level PressureIn'].as_matrix()
temp=stand['TemperatureF'].as_matrix()
l=[Model.clustering(val,b) for b in [prec,dew,hum,press,temp] ]
l1=[a.groupby('Label').count().index[0] for a in l ] #it finds the cluster with most of the data
l2=[a[a['Label']!=lab] for a,lab in zip(l,l1)] #the biggest cluster is removed in every dataframe
outliers=pd.concat(l2,join='inner',axis=1).index #the concat with join='inner' option find the intersection between
#the dataframes, the resulting indexes indicate the outliers
#the indexes in outliers are not expressed in seconds
#so I create a fake index
index=list(xrange(0,len(stand)))
#and I remove the indexes that corresponds to the outliers
[index.remove(a) for a in outliers ]
#using iloc I remove them from the original dataset
self.dataset.Value.iloc[outliers]=np.nan
#the dropped value are replaced using a linear interpolation
self.dataset.Value=self.dataset.Value.interpolate(method='linear')
self.dataset=self.dataset.dropna()
index=self.dataset.index-self.dataset.index[0]
self.dataset.index=index
self.outliers=outliers #the outliers are saved
#DEBUG
self.debug['Removeoutliers']=dataset
###
return self
def SVregression(self,percentage,inp='Dataset',kern='rbf',method='standard',c=2048,eps=0,gamma=0.01,tau=3):
'''Given the dataset of the input X and the dataset of the output Y it find a regression model using
Support vector regression algorithm of sklearn library
It returns a tuple: (prediction, median of three different metrics)
'''
X=y=prediction=metrics=None
X=self.ModelInputs[inp].copy() #input dataset
samples=int(percentage*len(X)/100) #evaluating the samples number given the percentage
x=X[:samples,:] #training input set
try:
y = self.ModelOutput[:samples] #training output set
except KeyError:
self.getOutput()
y = self.ModelOutput[:samples]
test_x=X[samples:,:] #testing input set
test_y=self.ModelOutput[samples:] # testing output set
#Parameters settings based on "Selection of Meta-Parameters for support vector regression"
# Vladimir Cherkassky and Yunqian Ma
if method=='standard':
n=len(y)
std=y.std()
c=tau*std
eps=tau*np.sqrt(log(n)/n)
#regression
svr =SVR(kernel=kern,C=c,epsilon=eps,gamma=gamma)
m=None
try:
m=svr.fit(x,y)
except ValueError:
return np.nan,9999
#debug
#self.debug['SVR']=self.ModelOutput
prediction=m.predict(test_x)
self.prediction=prediction
self.OutputTest['Standardized']=test_y
metrics=Model.extractMetrics(prediction,test_y)
return prediction,np.median(metrics)
@staticmethod
def weightedInterp(array):
l=int(len(array)/2)
if array[l]!=999:
return array[6]
#other weight function could be inserted using scipy.signal module
a=list(np.arange(1,l+1))
l1=[(n*m,m) for n,m in zip(array[0:6],a) if n!=999]
a.reverse()
l2=[(n*m,m) for n,m in zip(array[7:13],a) if n!=999]
try:
num=reduce(lambda x,y: x+y, [x[0] for x in l1+l2])
except TypeError:
return np.nan
den= reduce(lambda x,y: x+y, [x[1] for x in l1+l2])
return num/den
class ParseMap(object):
way={}
node={}
coord={}
way_limit={}
way_City={}
way_Street={}
way_coor={}
'''
#notes:
#the use of the tag_filter seems slower than a simple if-then
#not used at the moment
whitelist = set(('name', 'highway'))
#unused
def tag_filter(tags):
for key in tags.keys():
if key not in whitelist:
del tags[key]
if 'name' in tags and len(tags) == 1:
# tags with only a name have no information
# how to handle this element
del tags['name']
'''
def ways_stationary(self,ways):
for osmid, tags, refs in ways:
if tags.has_key('building'):
self.way[osmid]=refs
if tags.has_key('addr:city'): #sometimes the ways have also the city name in tags
self.way_City[osmid]=tags['addr:city']
else:
self.way_City[osmid]=None
if tags.has_key('name'):
self.way_Street[osmid]=tags['name']
else:
self.way_Street[osmid]=None
def ways(self,ways):
for osmid, tags, refs in ways:
if tags.has_key('highway'): #just the streets are needed
self.way[osmid]=refs
if tags.has_key('addr:city'): #sometimes the ways have also the city name in tags
self.way_City[osmid]=tags['addr:city']
else:
self.way_City[osmid]=None
if tags.has_key('name'):
self.way_Street[osmid]=tags['name']
else:
self.way_Street[osmid]=None
def nodes(self,nodes):
for idnode,tag,coor in nodes:
lat=coor[1] #it's necessary because the coordinates in the nodes
lon=coor[0] #are (lon,lat) while in the coords are (lat,lon)
self.node[idnode]=((lat,lon), tag)
def coords(self,coords):
for osm_id, lon, lat in coords:
self.coord[osm_id]=(lat,lon)
def fill_way_coords(self): #return a dictionary: {osmid:[list of nodes coordinates]}
for osmid in self.way.keys():
l=[]
for ref in self.way[osmid]:
try:
val=self.node[ref][0]
except KeyError:
val=self.coord[ref]
l.append(val)
self.way_coor[osmid]=l
def getRange(self):
for osmid in self.way.keys():
a=self.way_coor[osmid]
c=map(list, zip(*a)) #to unzip a list of tuples [(lat1,lon1),(lat2,lon2)] in [ [lat1,lat2),(lon1,lon2)]
lat=c[0]
lon=c[1]
self.way_limit[osmid]=[min(lat),min(lon),max(lat),max(lon)]
class ParseWeather(object):
'''Class that implement methods to get the weather informations from wunderground.com
'''
key='3187b62a57755d52'
def __init__(self):
if not(ParseWeather.key):
raise Exception('Key is not present, register at http://www.wunderground.com/weather/api/ to get one')
def getLocation(self,lat,lon):
'''Given latitude and longitude it returns the city,country and state corresponding to the coordinates '''
key=ParseWeather.key
url_template='http://api.wunderground.com/api/{key}/geolookup/q/{latitude},{longitude}.json'
url=url_template.format(key=key,latitude=lat,longitude=lon)
g = urllib2.urlopen(url)
json_string = g.read()
location = json.loads(json_string)
g.close()
diz=location['location']['nearby_weather_stations']['airport']['station'][0]
return diz['city'].replace(' ','_'),diz['country'],diz['state']
def getWeather(self,date,c,s):
'''Given a date a city and a state it returns a DataFrame '''
k=ParseWeather.key
d=date[:10].replace('-','')
url_template='http://api.wunderground.com/api/{key}/history_{date}/q/{state}/{city}.json'
url=url_template.format(key=k,date=d,state=s,city=c)
f = urllib2.urlopen(url)
json_string = f.read()
weather = json.loads(json_string) #parsing the json
f.close()
forecast=weather['history']['observations']
l=[]
for n in xrange(0,len(forecast)):
#every cycle define a row containing the weather information for a single hour
tmp=pd.DataFrame(forecast[n]) #definition of the dataframe
col=['utcdate','tempi','dewpti','hum','pressurei','visi','wdire','wspdi','precipi','conds','snow','wdird']
year=tmp.ix['year','utcdate'] #info about the day are extracted
month=tmp.ix['mon','utcdate']
day=tmp.ix['mday','utcdate']
hour=tmp.ix['hour','utcdate']
minute=tmp.ix['min','utcdate']
date= year +'-' + month + '-' + day + ' ' + hour + ':' + minute + ':00'
#the name of the columns are changed
newcol=['DateUTC', 'TemperatureF', 'Dew PointF', 'Humidity',
'Sea Level PressureIn', 'VisibilityMPH', 'Wind Direction',
'Wind SpeedMPH', 'PrecipitationIn', 'Conditions','Snow',
'WindDirDegrees']
tmp=tmp[col]
tmp.columns=newcol
tmp=tmp.head(1)
tmp['DateUTC']=date
tmp.index=[hour]
l.append(tmp)
df=pd.concat(l) #all the weather info are concatenated in a single dataframe
df=df.convert_objects(convert_dates='coerce')
return df
|
mit
|
yl565/statsmodels
|
statsmodels/tsa/descriptivestats.py
|
33
|
2304
|
# -*- coding: utf-8 -*-
"""Descriptive Statistics for Time Series
Created on Sat Oct 30 14:24:08 2010
Author: josef-pktd
License: BSD(3clause)
"""
import numpy as np
from . import stattools as stt
#todo: check subclassing for descriptive stats classes
class TsaDescriptive(object):
'''collection of descriptive statistical methods for time series
'''
def __init__(self, data, label=None, name=''):
self.data = data
self.label = label
self.name = name
def filter(self, num, den):
from scipy.signal import lfilter
xfiltered = lfilter(num, den, self.data)
return self.__class__(xfiltered, self.label, self.name + '_filtered')
def detrend(self, order=1):
from . import tsatools
xdetrended = tsatools.detrend(self.data, order=order)
return self.__class__(xdetrended, self.label, self.name + '_detrended')
def fit(self, order=(1,0,1), **kwds):
from .arima_model import ARMA
self.mod = ARMA(self.data)
self.res = self.mod.fit(order=order, **kwds)
#self.estimated_process =
return self.res
def acf(self, nlags=40):
return stt.acf(self.data, nlags=nlags)
def pacf(self, nlags=40):
return stt.pacf(self.data, nlags=nlags)
def periodogram(self):
#doesn't return frequesncies
return stt.periodogram(self.data)
# copied from fftarma.py
def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):
data = self.data
acf = self.acf(nacf)
pacf = self.pacf(nacf)
w = np.linspace(0, np.pi, nfreq, endpoint=False)
spdr = self.periodogram()[:nfreq] #(w)
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
namestr = ' for %s' % self.name if self.name else ''
ax.plot(data)
ax.set_title('Time series' + namestr)
ax = fig.add_subplot(2,2,2)
ax.plot(acf)
ax.set_title('Autocorrelation' + namestr)
ax = fig.add_subplot(2,2,3)
ax.plot(spdr) # (wr, spdr)
ax.set_title('Power Spectrum' + namestr)
ax = fig.add_subplot(2,2,4)
ax.plot(pacf)
ax.set_title('Partial Autocorrelation' + namestr)
return fig
|
bsd-3-clause
|
saethlin/python-astronomy-workshops
|
examples/fitting_examples.py
|
1
|
1566
|
import time
import numpy as np
import scipy.optimize
from matplotlib import pyplot as plt
def rms(data, model):
return np.sqrt(np.mean((data - model)**2))
def gaussian(x, height, center, width):
return height*np.exp(-(center - x)**2 / (2*width**2))
def voigt(x, height, center, gauss_width, lorentz_width):
dst = x-center
z = (dst+(abs(lorentz_width)*1j))/(abs(gauss_width)*np.sqrt(2))
return height * scipy.special.wofz(z).real/(abs(gauss_width)*np.sqrt(2*np.pi))
true_params = (1e4, 0, 20, 20)
npts = 1e5
xdata = np.linspace(-100, 100, npts)
ydata = voigt(xdata, *true_params) + np.random.randn(xdata.size)
print('true rms:', rms(ydata, voigt(xdata, *true_params)))
print()
# Polynomial model
start = time.time()
coefficients = np.polyfit(xdata, ydata, 10)
print('poly time:', time.time()-start)
poly_fit = np.polyval(coefficients, xdata)
print('poly rms:', rms(poly_fit, ydata))
print()
# Gaussian model
start = time.time()
model = scipy.optimize.curve_fit(gaussian, xdata, ydata, p0=[100,0,20])[0]
print('gaussian time:', time.time()-start)
gaussian_fit = gaussian(xdata, *model)
print('gaussian rms:', rms(gaussian_fit, ydata))
print()
# Voigt model
start = time.time()
model, _ = scipy.optimize.curve_fit(voigt, xdata, ydata)
print('voigt time:', time.time()-start)
voigt_fit = voigt(xdata, *model)
print('voigt rms:', rms(voigt_fit, ydata))
print()
plt.plot(xdata, ydata, 'k.')
plt.plot(xdata, poly_fit, color='b', lw=3)
plt.plot(xdata, gaussian_fit, color='r', lw=3)
plt.plot(xdata, voigt_fit, color='y', lw=3)
plt.show()
|
mit
|
michigraber/scikit-learn
|
sklearn/preprocessing/__init__.py
|
31
|
1235
|
"""
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
|
bsd-3-clause
|
mne-tools/mne-tools.github.io
|
0.21/_downloads/1355f558a1df99f9a2cec657b05c2b56/plot_sleep.py
|
7
|
12214
|
# -*- coding: utf-8 -*-
"""
.. _tut-sleep-stage-classif:
Sleep stage classification from polysomnography (PSG) data
==========================================================
.. note:: This code is taken from the analysis code used in [3]_. If you reuse
this code please consider citing this work.
This tutorial explains how to perform a toy polysomnography analysis that
answers the following question:
.. important:: Given two subjects from the Sleep Physionet dataset [1]_ [2]_,
namely *Alice* and *Bob*, how well can we predict the sleep
stages of *Bob* from *Alice's* data?
This problem is tackled as supervised multiclass classification task. The aim
is to predict the sleep stage from 5 possible stages for each chunk of 30
seconds of data.
.. contents:: This tutorial covers:
:local:
:depth: 2
.. _Pipeline: https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html
.. _FunctionTransformer: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.FunctionTransformer.html
.. _physionet_labels: https://physionet.org/physiobank/database/sleep-edfx/#sleep-cassette-study-and-data
""" # noqa: E501
# Authors: Alexandre Gramfort <[email protected]>
# Stanislas Chambon <[email protected]>
# Joan Massich <[email protected]>
#
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets.sleep_physionet.age import fetch_data
from mne.time_frequency import psd_welch
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
##############################################################################
# Load the data
# -------------
#
# Here we download the data from two subjects and the end goal is to obtain
# :term:`epochs` and its associated ground truth.
#
# MNE-Python provides us with
# :func:`mne.datasets.sleep_physionet.age.fetch_data` to conveniently download
# data from the Sleep Physionet dataset [1]_ [2]_.
# Given a list of subjects and records, the fetcher downloads the data and
# provides us for each subject, a pair of files:
#
# * ``-PSG.edf`` containing the polysomnography. The :term:`raw` data from the
# EEG helmet,
# * ``-Hypnogram.edf`` containing the :term:`annotations` recorded by an
# expert.
#
# Combining these two in a :class:`mne.io.Raw` object then we can extract
# :term:`events` based on the descriptions of the annotations to obtain the
# :term:`epochs`.
#
# Read the PSG data and Hypnograms to create a raw object
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ALICE, BOB = 0, 1
[alice_files, bob_files] = fetch_data(subjects=[ALICE, BOB], recording=[1])
mapping = {'EOG horizontal': 'eog',
'Resp oro-nasal': 'misc',
'EMG submental': 'misc',
'Temp rectal': 'misc',
'Event marker': 'misc'}
raw_train = mne.io.read_raw_edf(alice_files[0])
annot_train = mne.read_annotations(alice_files[1])
raw_train.set_annotations(annot_train, emit_warning=False)
raw_train.set_channel_types(mapping)
# plot some data
raw_train.plot(duration=60, scalings='auto')
##############################################################################
# Extract 30s events from annotations
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The Sleep Physionet dataset is annotated using
# `8 labels <physionet_labels_>`_:
# Wake (W), Stage 1, Stage 2, Stage 3, Stage 4 corresponding to the range from
# light sleep to deep sleep, REM sleep (R) where REM is the abbreviation for
# Rapid Eye Movement sleep, movement (M), and Stage (?) for any none scored
# segment.
#
# We will work only with 5 stages: Wake (W), Stage 1, Stage 2, Stage 3/4, and
# REM sleep (R). To do so, we use the ``event_id`` parameter in
# :func:`mne.events_from_annotations` to select which events are we
# interested in and we associate an event identifier to each of them.
#
# Moreover, the recordings contain long awake (W) regions before and after each
# night. To limit the impact of class imbalance, we trim each recording by only
# keeping 30 minutes of wake time before the first occurrence and 30 minutes
# after the last occurrence of sleep stages.
annotation_desc_2_event_id = {'Sleep stage W': 1,
'Sleep stage 1': 2,
'Sleep stage 2': 3,
'Sleep stage 3': 4,
'Sleep stage 4': 4,
'Sleep stage R': 5}
# keep last 30-min wake events before sleep and first 30-min wake events after
# sleep and redefine annotations on raw data
annot_train.crop(annot_train[1]['onset'] - 30 * 60,
annot_train[-2]['onset'] + 30 * 60)
raw_train.set_annotations(annot_train, emit_warning=False)
events_train, _ = mne.events_from_annotations(
raw_train, event_id=annotation_desc_2_event_id, chunk_duration=30.)
# create a new event_id that unifies stages 3 and 4
event_id = {'Sleep stage W': 1,
'Sleep stage 1': 2,
'Sleep stage 2': 3,
'Sleep stage 3/4': 4,
'Sleep stage R': 5}
# plot events
fig = mne.viz.plot_events(events_train, event_id=event_id,
sfreq=raw_train.info['sfreq'],
first_samp=events_train[0, 0])
# keep the color-code for further plotting
stage_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
##############################################################################
# Create Epochs from the data based on the events found in the annotations
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tmax = 30. - 1. / raw_train.info['sfreq'] # tmax in included
epochs_train = mne.Epochs(raw=raw_train, events=events_train,
event_id=event_id, tmin=0., tmax=tmax, baseline=None)
print(epochs_train)
##############################################################################
# Applying the same steps to the test data from Bob
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
raw_test = mne.io.read_raw_edf(bob_files[0])
annot_test = mne.read_annotations(bob_files[1])
annot_test.crop(annot_test[1]['onset'] - 30 * 60,
annot_test[-2]['onset'] + 30 * 60)
raw_test.set_annotations(annot_test, emit_warning=False)
raw_test.set_channel_types(mapping)
events_test, _ = mne.events_from_annotations(
raw_test, event_id=annotation_desc_2_event_id, chunk_duration=30.)
epochs_test = mne.Epochs(raw=raw_test, events=events_test, event_id=event_id,
tmin=0., tmax=tmax, baseline=None)
print(epochs_test)
##############################################################################
# Feature Engineering
# -------------------
#
# Observing the power spectral density (PSD) plot of the :term:`epochs` grouped
# by sleeping stage we can see that different sleep stages have different
# signatures. These signatures remain similar between Alice and Bob's data.
#
# The rest of this section we will create EEG features based on relative power
# in specific frequency bands to capture this difference between the sleep
# stages in our data.
# visualize Alice vs. Bob PSD by sleep stage.
fig, (ax1, ax2) = plt.subplots(ncols=2)
# iterate over the subjects
stages = sorted(event_id.keys())
for ax, title, epochs in zip([ax1, ax2],
['Alice', 'Bob'],
[epochs_train, epochs_test]):
for stage, color in zip(stages, stage_colors):
epochs[stage].plot_psd(area_mode=None, color=color, ax=ax,
fmin=0.1, fmax=20., show=False,
average=True, spatial_colors=False)
ax.set(title=title, xlabel='Frequency (Hz)')
ax2.set(ylabel='µV^2/Hz (dB)')
ax2.legend(ax2.lines[2::3], stages)
plt.show()
##############################################################################
# Design a scikit-learn transformer from a Python function
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# We will now create a function to extract EEG features based on relative power
# in specific frequency bands to be able to predict sleep stages from EEG
# signals.
def eeg_power_band(epochs):
"""EEG relative power band feature extraction.
This function takes an ``mne.Epochs`` object and creates EEG features based
on relative power in specific frequency bands that are compatible with
scikit-learn.
Parameters
----------
epochs : Epochs
The data.
Returns
-------
X : numpy array of shape [n_samples, 5]
Transformed data.
"""
# specific frequency bands
FREQ_BANDS = {"delta": [0.5, 4.5],
"theta": [4.5, 8.5],
"alpha": [8.5, 11.5],
"sigma": [11.5, 15.5],
"beta": [15.5, 30]}
psds, freqs = psd_welch(epochs, picks='eeg', fmin=0.5, fmax=30.)
# Normalize the PSDs
psds /= np.sum(psds, axis=-1, keepdims=True)
X = []
for fmin, fmax in FREQ_BANDS.values():
psds_band = psds[:, :, (freqs >= fmin) & (freqs < fmax)].mean(axis=-1)
X.append(psds_band.reshape(len(psds), -1))
return np.concatenate(X, axis=1)
##############################################################################
# Multiclass classification workflow using scikit-learn
# -----------------------------------------------------
#
# To answer the question of how well can we predict the sleep stages of Bob
# from Alice's data and avoid as much boilerplate code as possible, we will
# take advantage of two key features of sckit-learn:
# `Pipeline`_ , and `FunctionTransformer`_.
#
# Scikit-learn pipeline composes an estimator as a sequence of transforms
# and a final estimator, while the FunctionTransformer converts a python
# function in an estimator compatible object. In this manner we can create
# scikit-learn estimator that takes :class:`mne.Epochs` thanks to
# ``eeg_power_band`` function we just created.
pipe = make_pipeline(FunctionTransformer(eeg_power_band, validate=False),
RandomForestClassifier(n_estimators=100, random_state=42))
# Train
y_train = epochs_train.events[:, 2]
pipe.fit(epochs_train, y_train)
# Test
y_pred = pipe.predict(epochs_test)
# Assess the results
y_test = epochs_test.events[:, 2]
acc = accuracy_score(y_test, y_pred)
print("Accuracy score: {}".format(acc))
##############################################################################
# In short, yes. We can predict Bob's sleeping stages based on Alice's data.
#
# Further analysis of the data
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# We can check the confusion matrix or the classification report.
print(confusion_matrix(y_test, y_pred))
##############################################################################
#
print(classification_report(y_test, y_pred, target_names=event_id.keys()))
##############################################################################
# Exercise
# --------
#
# Fetch 50 subjects from the Physionet database and run a 5-fold
# cross-validation leaving each time 10 subjects out in the test set.
#
# References
# ----------
#
# .. [1] B Kemp, AH Zwinderman, B Tuk, HAC Kamphuisen, JJL Oberyé. Analysis of
# a sleep-dependent neuronal feedback loop: the slow-wave
# microcontinuity of the EEG. IEEE-BME 47(9):1185-1194 (2000).
#
# .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
# Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000)
# PhysioBank, PhysioToolkit, and PhysioNet: Components of a New
# Research Resource for Complex Physiologic Signals.
# Circulation 101(23):e215-e220
#
# .. [3] Chambon, S., Galtier, M., Arnal, P., Wainrib, G. and Gramfort, A.
# (2018)A Deep Learning Architecture for Temporal Sleep Stage
# Classification Using Multivariate and Multimodal Time Series.
# IEEE Trans. on Neural Systems and Rehabilitation Engineering 26:
# (758-769).
#
|
bsd-3-clause
|
sumspr/scikit-learn
|
sklearn/linear_model/coordinate_descent.py
|
12
|
75078
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
check_input = 'check_input' not in params or params['check_input']
pre_fit = 'check_input' not in params or params['pre_fit']
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F',
copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if pre_fit:
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False,
copy=False, Xy_precompute_order='F')
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, 'csc', dtype=np.float64,
order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
# We expect X and y to be already float64 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False, Xy_precompute_order='F')
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False,
pre_fit=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
|
bsd-3-clause
|
NeuroDataDesign/seelviz
|
graphfiles/scripts/csvMaker.py
|
1
|
1118
|
import clearity as cl # I wrote this module for easier operations on data
import clearity.resources as rs
import csv,gc # garbage memory collection :)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import jgraph as ig
####Normal Image
#c = cl.Clarity('/home/albert/claritycontrol/code/data/raw/Cocaine175')
# #fname = rs.HIST_DATA_PATH+token+".csv"
#claritycsv = c.loadNiiImg().imgToPoints(threshold=0.1,sample=0.11).savePoints()
# #np.savetxt(token,claritycsv,delimiter=',')
#print "Fear197.csv saved."
#del c
#gc.collect()
####General Image
#c = cl.Clarity('globaleq')
# #fname = rs.HIST_DATA_PATH+token+".csv"
#claritycsv = c.loadEqImg().imgToPoints(threshold=0.99,sample=0.005).savePoints()
# #np.savetxt(token,claritycsv,delimiter=',')
#print "globaleq.csv saved."
#del c
#gc.collect()
####Local Image
c = cl.Clarity('AutAlocaleq')
#fname = rs.HIST_DATA_PATH+token+".csv"
claritycsv = c.loadEqImg().imgToPoints(threshold=0.9,sample=0.1).savePoints()
#np.savetxt(token,claritycsv,delimiter=',')
print "localeq.csv saved."
del c
gc.collect()
|
apache-2.0
|
walterreade/scikit-learn
|
examples/linear_model/plot_ols_3d.py
|
350
|
2040
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
|
bsd-3-clause
|
mbeyeler/pulse2percept
|
examples/datasets/plot_data_beyeler2019.py
|
1
|
8496
|
"""
===============================================================================
Phosphene drawings from Beyeler et al. (2019)
===============================================================================
This example shows how to use the Beyeler et al. (2019) dataset.
[Beyeler2019]_ asked Argus I/II users to draw what they see in response to
single-electrode stimulation.
.. important ::
For this dataset you will need to install both
`Pandas <https://pandas.pydata.org>`_ (``pip install pandas``) and
`HDF4 for Python <https://www.h5py.org>`_ (``pip install h5py``).
Loading the dataset
-------------------
Due to its size (66 MB), the dataset is not included with pulse2percept, but
can be downloaded from the Open Science Framework (OSF).
By default, the dataset will be stored in a local directory
‘~/pulse2percept_data/’ within your user directory (but a different path can be
specified).
This way, the datasets is only downloaded once, and future calls to the fetch
function will load the dataset from your local copy.
The data itself will be provided as a Pandas ``DataFrame``:
"""
# sphinx_gallery_thumbnail_number = 2
from pulse2percept.datasets import fetch_beyeler2019
data = fetch_beyeler2019()
print(data)
###############################################################################
#
# Inspecting the DataFrame tells us that there are 400 phosphene drawings
# (the rows) each with 16 different attributes (the columns).
#
# These attributes include specifiers such as "subject", "electrode", and
# "image". We can print all column names using:
data.columns
###############################################################################
# .. note ::
#
# The meaning of all column names is explained in the docstring of
# the :py:func:`~pulse2percept.datasets.fetch_beyeler2019` function.
#
# For example, "subject" contains the different subject IDs used in the study:
data.subject.unique()
###############################################################################
# To select all drawings from Subject 2, we can index into the DataFrame as
# follows:
print(data[data.subject == 'S2'])
###############################################################################
# This leaves us with 110 rows, each of which correspond to one phosphene
# drawings from a number of different electrodes and trials.
#
# An alternative to indexing into the DataFrame is to load only a subset of
# the data:
print(fetch_beyeler2019(subjects='S2'))
###############################################################################
# Plotting the data
# -----------------
#
# Arguably the most important column is "image". This is the phosphene drawing
# obtained during a particular trial.
#
# Each phosphene drawing is a 2D black-and-white NumPy array, so we can just
# plot it using Matplotlib like any other image:
import matplotlib.pyplot as plt
plt.imshow(data.loc[0, 'image'], cmap='gray')
###############################################################################
# However, we might be more interested in seeing how phosphene shape differs
# for different electrodes.
# For this we can use :py:func:`~pulse2percept.viz.plot_argus_phosphenes` from
# the :py:mod:`~pulse2percept.viz` module.
# In addition to the ``data`` matrix, the function will also want an
# :py:class:`~pulse2percept.implants.ArgusII` object implanted at the correct
# location.
#
# Consulting [Beyeler2019]_ tells us that the prosthesis was roughly implanted
# in the following location:
from pulse2percept.implants import ArgusII
argus = ArgusII(x=-1331, y=-850, rot=-28.4, eye='RE')
###############################################################################
# For now, let's focus on the data from Subject 2:
data = fetch_beyeler2019(subjects='S2')
###############################################################################
# Passing both ``data`` and ``argus`` to
# :py:func:`~pulse2percept.viz.plot_argus_phosphenes` will then allow the
# function to overlay the phosphene drawings over a schematic of the implant.
# Here, phosphene drawings from different trials are averaged, and aligned with
# the center of the electrode that was used to obtain the drawing:
from pulse2percept.viz import plot_argus_phosphenes
plot_argus_phosphenes(data, argus)
###############################################################################
# Great! We have just reproduced a panel from Figure 2 in [Beyeler2019]_.
#
# As [Beyeler2019]_ went on to show, the orientation of these phosphenes is
# well aligned with the map of nerve fiber bundles (NFBs) in each subject's
# eye.
#
# To see how the phosphene drawings line up with the NFBs, we can also pass an
# :py:class:`~pulse2percept.models.AxonMapModel` to the function.
# Of course, we need to make sure that we use the correct dimensions. Subject
# S2 had their optic disc center located 16.2 deg nasally, 1.38 deg superior
# from the fovea:
from pulse2percept.models import AxonMapModel
model = AxonMapModel(loc_od=(16.2, 1.38))
plot_argus_phosphenes(data, argus, axon_map=model)
###############################################################################
# Predicting phosphene shape
# --------------------------
#
# In addition, the :py:class:`~pulse2percept.models.AxonMapModel` is well
# suited to predict the shape of individual phosphenes. Using the values given
# in [Beyeler2019]_, we can tailor the axon map parameters to Subject 2:
import numpy as np
model = AxonMapModel(rho=315, axlambda=500, loc_od=(16.2, 1.38),
xrange=(-30, 30), yrange=(-22.5, 22.5),
thresh_percept=1 / np.sqrt(np.e))
model.build()
###############################################################################
# Now we need to activate one electrode at a time, and predict the resulting
# percept. We could build a :py:class:`~pulse2percept.stimuli.Stimulus` object
# with a for loop that does just that, or we can use the following trick.
#
# The stimulus' data container is a (electrodes, timepoints) shaped 2D NumPy
# array. Activating one electrode at a time is therefore the same as an
# identity matrix whose size is equal to the number of electrodes. In code:
# Find the names of all the electrodes in the dataset:
electrodes = data.electrode.unique()
# Activate one electrode at a time:
import numpy as np
from pulse2percept.stimuli import Stimulus
argus.stim = Stimulus(np.eye(len(electrodes)), electrodes=electrodes)
###############################################################################
# Using the model's
# :py:func:`~pulse2percept.models.AxonMapModel.predict_percept`, we then get
# a Percept object where each frame is the percept generated from activating
# a single electrode:
percepts = model.predict_percept(argus)
percepts.play()
###############################################################################
# Finally, we can visualize the ground-truth and simulated phosphenes
# side-by-side:
from pulse2percept.viz import plot_argus_simulated_phosphenes
fig, (ax_data, ax_sim) = plt.subplots(ncols=2, figsize=(15, 5))
plot_argus_phosphenes(data, argus, scale=0.75, ax=ax_data)
plot_argus_simulated_phosphenes(percepts, argus, scale=1.25, ax=ax_sim)
ax_data.set_title('Ground-truth phosphenes')
ax_sim.set_title('Simulated phosphenes')
###############################################################################
# Analyzing phosphene shape
# -------------------------
#
# The phosphene drawings also come annotated with different shape descriptors:
# area, orientation, and elongation.
# Elongation is also called eccentricity in the computer vision literature,
# which is not to be confused with retinal eccentricity. It is simply a number
# between 0 and 1, where 0 corresponds to a circle and 1 corresponds to an
# infinitesimally thin line (note that the Methods section of [Beyeler2019]_
# got it wrong).
#
# [Beyeler2019]_ made the point that if each phosphene could be considered a
# pixel (or essentially a blob), as is so often assumed in the literature, then
# most phosphenes should have zero elongation.
#
# Instead, using Matplotlib's histogram function, we can convince ourselves
# that most phosphenes are in fact elongated:
data = fetch_beyeler2019()
data.eccentricity.plot(kind='hist')
plt.xlabel('phosphene elongation')
###############################################################################
# Phosphenes are not pixels!
# And with that we have just reproduced Fig. 3C of [Beyeler2019]_.
|
bsd-3-clause
|
bibsian/database-development
|
test/logiclayer/datalayer/test_filehandles.py
|
1
|
14956
|
#!/usr/bin/env python
import pytest
from pandas import read_csv, read_excel, read_table, DataFrame
from collections import namedtuple
import sys, os
import copy
from poplerGUI import class_inputhandler as ini
rootpath = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname( __file__ ))))
end = os.path.sep
sys.path.append(os.path.realpath(os.path.dirname(
rootpath + 'logiclayer' + end)))
os.chdir(rootpath)
@pytest.fixture
def DataFileOriginator(Memento):
class DataFileOriginator(object):
"""
FileHandler (i.e. the originator)
is a class that will take a user selected
file, identify the extension, and load the data as an
instance of a pandas dataframe... This is all the
handler does.
Class has some properties for working with file extensions
and pandas I/O methods
This is the originator of the initial data
"""
get_info = namedtuple(
'get_info', 'name ext version')
state = 'original'
def __init__(self, inputclsinstance):
self.filetoload = inputclsinstance.filename
if inputclsinstance.lnedentry['sheet'] is not '':
self.sheet = inputclsinstance.lnedentry['sheet']
else:
self.sheet = None
if inputclsinstance.lnedentry['tskip'] is not '':
self.topskiplines = int(inputclsinstance.lnedentry[
'tskip'])
else:
self.topskiplines = None
if inputclsinstance.lnedentry['bskip'] is not '':
self.bottomskiplines = int(inputclsinstance.lnedentry[
'bskip'])
else:
self.bottomskiplines = 0
if inputclsinstance.lnedentry['delim'] is not '':
self.delimitchar = inputclsinstance.lnedentry[
'delim']
else:
self.delimitchar = '\t'
if inputclsinstance.checks is True:
self.header = -1
else:
self.header = 'infer'
self._data = None
self.inputoptions = {
'.csv': {
'filename':self.filetoload
},
'.xlsx': {
'filename':self.filetoload,
'sheet':self.sheet
},
'.txt': {
'filename': self.filetoload,
'skiprows': self.topskiplines,
'skipfooter': self.bottomskiplines,
'delimiter': self.delimitchar,
'header': self.header
}
}
self.accepted_filetypes = [
'.csv', '.txt', '.xlsx', 'xls'
]
@property
def file_id(self):
'''
extension property based on the user inputs
'''
if self.filetoload is None:
return None
else:
try:
filename, ex = os.path.splitext(self.filetoload)
if '.' in ex:
print('filename (class): ', filename, ex)
return self.get_info(
name=filename, ext=ex, version=self.state)
else:
raise IOError(self.ext_error)
except:
raise IOError(self.ext_error)
def save_to_memento(self):
'''
Adding a method to set the data
attribute. File type must be able to be read in by
pandas.
'''
if self.file_id.ext not in self.accepted_filetypes:
raise IOError('Cannot open file type')
try:
if self.file_id.ext == '.csv':
self._data = read_csv(
self.inputoptions[
'.csv']['filename']
)
elif (
self.file_id.ext == '.xls' or
self.file_id.ext == '.xlsx'):
self._data = read_excel(
self.inputoptions[
'xlsx']['filename'],
sheetname=self.inputoptions[
'xlsx']['sheet']
)
elif self.file_id.ext == '.txt':
self._data = read_table(
self.inputoptions[
'.txt']['filename'],
delimiter=self.inputoptions[
'.txt']['delimiter'],
skiprows=self.inputoptions[
'.txt']['skiprows'],
header=self.inputoptions[
'.txt']['header'],
error_bad_lines=False,
engine='c'
)
except Exception as e:
print('Could not read in file: ', str(e))
for i, item in enumerate(self._data.columns):
na_vals = [
9999, 99999, 999999,
-9999, -99999, -999999,
-8888, -88888, -88888, -888,
9999.0, 99999.0, 999999.0,
-9999.0, -99999.0, -999999.0,
-8888.0, -88888.0, -888888.0,
-888.0
]
self._data[item].replace(
dict(zip(na_vals, ['NaN']*len(na_vals))),
inplace=True)
self._data.fillna('NA',inplace=True)
self._data = self._data[
self._data.isnull().all(axis=1) != True]
memento = Memento(dfstate = self._data.copy(),state= self.state)
return memento
return DataFileOriginator
@pytest.fixture
def Caretaker():
class Caretaker(object):
'''Caretaker for state of dataframe'''
def __init__(self):
self._statelogbook = {}
self._statelist = []
def save(self, memento):
'''
Saves memento object with a dictionary
recording the state name and the dataframe state
'''
self._statelogbook[
memento.get_state()] = memento
self._statelist.append(memento.get_state())
def restore(self):
'''
Restores a memento given a state_name
'''
try:
if self._statelist:
print('restore list:', self._statelist)
if len(self._statelist) == 1:
print('og restore')
return self._statelogbook[self._statelist[0]]
else:
self._statelist.pop()
return self._statelogbook[self._statelist[-1]]
else:
raise AttributeError('Cannot undo further')
except Exception as e:
print(str(e))
return Caretaker
@pytest.fixture
def Memento():
class Memento(object):
'''
Memento Class.
This simply records the
state of the data and gives it to the
FileCaretaker
'''
def __init__(self, dfstate, state):
self._dfstate = dfstate.copy(deep=True)
self._state = str(state)
def get_dfstate(self):
return self._dfstate
def get_state(self):
return self._state
return Memento
@pytest.fixture
def DataOriginator():
class DataOriginator(object):
def __init__(self, df, state):
self._data = df
self._state = state
def save_to_memento(self):
memento = Memento(self._data, self._state)
return memento
def restore_from_memento(self, memento):
self._data = memento.get_dfstate()
self._state = memento.get_state()
return DataOriginator
@pytest.fixture
def user_input():
rbtn = {'csv': True, 'xlsx': False, 'txt': False}
lned = {
'sheet': '', 'delim': '', 'tskip': '', 'bskip': '',
'header': ''
}
fname = (
rootpath + end + 'Datasets_manual_test' + end +
'raw_data_test_1.csv')
user_input = ini.InputHandler(
name='fileoptions',lnedentry=lned,
rbtns=rbtn, filename=fname, checks=False)
return user_input
@pytest.fixture
def real_data():
rbtn = {'csv': False, 'xlsx': False, 'txt': True}
lned = {
'sheet': '', 'delim': '\t', 'tskip': '', 'bskip': '',
'header': ''
}
fname = (rootpath + end +'data'+end + 'SGS_LTER_Humus_canopyCover.txt')
user_input = ini.InputHandler(
name='fileoptions',lnedentry=lned,
rbtns=rbtn, filename=fname, checks=False)
return user_input
# def test_csv_reader_method(
# user_input, Caretaker,
# DataFileOriginator, DataOriginator, Memento):
# caretaker = Caretaker()
# originator_from_file = DataFileOriginator(user_input)
# originator = DataOriginator(None, 'Initialize')
# assert (isinstance(originator, DataOriginator)) is True
# caretaker.save(originator_from_file.save_to_memento())
# originator.restore_from_memento(caretaker.restore())
# assert (isinstance(originator._data, DataFrame)) is True
def test_txt_true(
real_data, Caretaker,
DataFileOriginator, DataOriginator, Memento):
caretaker = Caretaker()
originator_from_file = DataFileOriginator(real_data)
originator = DataOriginator(None, 'Initialize')
assert (isinstance(originator, DataOriginator)) is True
caretaker.save(originator_from_file.save_to_memento())
originator.restore_from_memento(caretaker.restore())
assert (isinstance(originator._data, DataFrame)) is True
print(originator._data)
#
# @pytest.fixture
# def user_txt():
# rbtn = {'csv': False, 'xlsx': False, 'txt': True}
# lned = {
# 'sheet': '', 'delim': '', 'tskip': '', 'bskip': '',
# 'header': ''
# }
# fname = (
# rootpath + end + 'Datasets_manual_test' + end +
# 'climate_precip.txt')
# user_input = ini.InputHandler(
# name='fileoptions', lnedentry=lned,
# rbtns=rbtn, filename=fname, checks=True)
# return user_input
#
# def test_txt_reader_method(
# user_txt, Caretaker,
# DataFileOriginator, DataOriginator, Memento):
# caretaker = Caretaker()
# originator_from_file = DataFileOriginator(user_txt)
# originator = DataOriginator(None, 'Initialize')
# assert (isinstance(originator, DataOriginator)) is True
# caretaker.save(originator_from_file.save_to_memento())
# originator.restore_from_memento(caretaker.restore())
# assert (isinstance(originator._data, DataFrame)) is True
#
#
#
# @pytest.fixture
# def user_txt_delim():
# rbtn = {'csv': False, 'xlsx': False, 'txt': True}
# lned = {
# 'sheet': '', 'delim': ',', 'tskip': '', 'bskip': '',
# 'header': ''
# }
# fname = 'Datasets_manual_test/climate_temp_test.txt'
# user_input = ini.InputHandler(
# name='fileoptions', lnedentry=lned,
# rbtns=rbtn, filename=fname, checks=False)
# return user_input
#
# def test_txt_comma_delim_reader_method(
# user_txt_delim, Caretaker, user_txt,
# DataFileOriginator, DataOriginator, Memento):
# caretaker = Caretaker()
#
# originator_from_file = DataFileOriginator(user_txt_delim)
# originator = DataOriginator(None, 'Initialize')
# assert (isinstance(originator, DataOriginator)) is True
# caretaker.save(originator_from_file.save_to_memento())
# originator.restore_from_memento(caretaker.restore())
# assert (isinstance(originator._data, DataFrame)) is True
# originator_from_file = DataFileOriginator(user_txt)
# originator = DataOriginator(None, 'Initialize')
# assert (isinstance(originator, DataOriginator)) is True
# caretaker.save(originator_from_file.save_to_memento())
# originator.restore_from_memento(caretaker.restore())
# assert (isinstance(originator._data, DataFrame)) is True
#
#
#
# @pytest.fixture
# def user_txt_skiplines_no_header():
# rbtn = {'csv': False, 'xlsx': False, 'txt': True}
# lned = {
# 'sheet': '', 'delim': '\t', 'tskip': '8', 'bskip': '',
# 'header': '-1'
# }
# fname = (
# rootpath + end + 'Datasets_manual_test' + end +
# 'skip_no_header_test.txt')
# user_input = ini.InputHandler(
# name='fileoptions', lnedentry=lned,
# rbtns=rbtn, filename=fname, checks=True)
# return user_input
#
# def test_txt_skiplines_no_header(
# user_txt_skiplines_no_header, Caretaker,
# DataFileOriginator, DataOriginator, Memento):
# caretaker = Caretaker()
# originator_from_file = DataFileOriginator(
# user_txt_skiplines_no_header)
# originator = DataOriginator(None, 'Initialize')
# assert (isinstance(originator, DataOriginator)) is True
# caretaker.save(originator_from_file.save_to_memento())
# originator.restore_from_memento(caretaker.restore())
# data = originator._data
# assert (isinstance(data, DataFrame)) is True
# assert (len(data.columns) == 3) is True
# print(data)
#
# @pytest.fixture
# def user_txt_skiplines_header():
# rbtn = {'csv': False, 'xlsx': False, 'txt': True}
# lned = {
# 'sheet': '', 'delim': '\t', 'tskip': '10', 'bskip': '',
# ': ''
# }
# fname = (
# rootpath + end + 'Datasets_manual_test' + end +
# 'skip_header_test.txt')
# user_input = ini.InputHandler(
# name='fileoptions', lnedentry=lned,
# rbtns=rbtn, filename=fname, checks=False)
# return user_input
#
# def test_txt_skiplines_header(
# user_txt_skiplines_header, Caretaker,
# DataFileOriginator, DataOriginator, Memento):
# caretaker = Caretaker()
# originator_from_file = DataFileOriginator(
# user_txt_skiplines_header)
# originator = DataOriginator(None, 'Initialize')
# assert (isinstance(originator, DataOriginator)) is True
# caretaker.save(originator_from_file.save_to_memento())
# originator.restore_from_memento(caretaker.restore())
# data = originator._data
# assert (isinstance(data, DataFrame)) is True
# assert (len(data.columns) == 3) is True
# print(data)
#
|
mit
|
mfjb/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
272
|
7798
|
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
|
bsd-3-clause
|
joernhees/scikit-learn
|
sklearn/datasets/tests/test_mldata.py
|
384
|
5221
|
"""Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
|
bsd-3-clause
|
eickenberg/scikit-learn
|
benchmarks/bench_lasso.py
|
297
|
3305
|
"""
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
aabadie/scikit-learn
|
sklearn/linear_model/tests/test_sag.py
|
45
|
28228
|
# Authors: Danny Sullivan <[email protected]>
# Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import math
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import _multinomial_grad_loss_all_samples
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.linear_model.base import make_dataset
from sklearn.linear_model.logistic import _multinomial_loss_grad
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import row_norms
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import compute_class_weight
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.datasets import make_blobs, load_iris
from sklearn.base import clone
iris = load_iris()
# this is used for sag classification
def log_dloss(p, y):
z = p * y
# approximately equal and saves the computation of the log
if z > 18.0:
return math.exp(-z) * -y
if z < -18.0:
return -y
return -y / (math.exp(z) + 1.0)
def log_loss(p, y):
return np.mean(np.log(1. + np.exp(-y * p)))
# this is used for sag regression
def squared_dloss(p, y):
return p - y
def squared_loss(p, y):
return np.mean(0.5 * (p - y) * (p - y))
# function for measuring the log loss
def get_pobj(w, alpha, myX, myy, loss):
w = w.ravel()
pred = np.dot(myX, w)
p = loss(pred, myy)
p += alpha * w.dot(w) / 2.
return p
def sag(X, y, step_size, alpha, n_iter=1, dloss=None, sparse=False,
sample_weight=None, fit_intercept=True):
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(X.shape[1])
sum_gradient = np.zeros(X.shape[1])
gradient_memory = np.zeros((n_samples, n_features))
intercept = 0.0
intercept_sum_gradient = 0.0
intercept_gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
decay = 1.0
seen = set()
# sparse data has a fixed decay of .01
if sparse:
decay = .01
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
# idx = k
entry = X[idx]
seen.add(idx)
p = np.dot(entry, weights) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient + alpha * weights
sum_gradient += update - gradient_memory[idx]
gradient_memory[idx] = update
if fit_intercept:
intercept_sum_gradient += (gradient -
intercept_gradient_memory[idx])
intercept_gradient_memory[idx] = gradient
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
weights -= step_size * sum_gradient / len(seen)
return weights, intercept
def sag_sparse(X, y, step_size, alpha, n_iter=1,
dloss=None, sample_weight=None, sparse=False,
fit_intercept=True):
if step_size * alpha == 1.:
raise ZeroDivisionError("Sparse sag does not handle the case "
"step_size * alpha == 1")
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=np.int)
gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
# sparse data has a fixed decay of .01
if sparse:
decay = .01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
# idx = k
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter
p = (wscale * np.dot(entry, weights)) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
sum_gradient += update - (gradient_memory[idx] * entry)
if fit_intercept:
intercept_sum_gradient += gradient - gradient_memory[idx]
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
gradient_memory[idx] = gradient
wscale *= (1.0 - alpha * step_size)
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = (c_sum[counter - 1] +
step_size / (wscale * len(seen)))
if counter >= 1 and wscale < 1e-9:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
weights *= wscale
return weights, intercept
def get_step_size(X, alpha, fit_intercept, classification=True):
if classification:
return (4.0 / (np.max(np.sum(X * X, axis=1))
+ fit_intercept + 4.0 * alpha))
else:
return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
@ignore_warnings
def test_classifier_matching():
n_samples = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
y[y == 0] = -1
alpha = 1.1
n_iter = 80
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept)
clf = LogisticRegression(solver="sag", fit_intercept=fit_intercept,
tol=1e-11, C=1. / alpha / n_samples,
max_iter=n_iter, random_state=10)
clf.fit(X, y)
weights, intercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights = np.atleast_2d(weights)
intercept = np.atleast_1d(intercept)
weights2 = np.atleast_2d(weights2)
intercept2 = np.atleast_1d(intercept2)
assert_array_almost_equal(weights, clf.coef_, decimal=10)
assert_array_almost_equal(intercept, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_regressor_matching():
n_samples = 10
n_features = 5
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
alpha = 1.
n_iter = 100
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha * n_samples, max_iter=n_iter)
clf.fit(X, y)
weights1, intercept1 = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
assert_array_almost_equal(weights1, clf.coef_, decimal=10)
assert_array_almost_equal(intercept1, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_sag_pobj_matches_logistic_regression():
"""tests if the sag pobj matches log reg"""
n_samples = 100
alpha = 1.0
max_iter = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
clf1 = LogisticRegression(solver='sag', fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf2 = clone(clf1)
clf3 = LogisticRegression(fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj2, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj1, decimal=4)
@ignore_warnings
def test_sag_pobj_matches_ridge_regression():
"""tests if the sag pobj matches ridge reg"""
n_samples = 100
n_features = 10
alpha = 1.0
n_iter = 100
fit_intercept = False
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
clf1 = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha, max_iter=n_iter, random_state=42)
clf2 = clone(clf1)
clf3 = Ridge(fit_intercept=fit_intercept, tol=.00001, solver='lsqr',
alpha=alpha, max_iter=n_iter, random_state=42)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj1, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj2, decimal=4)
@ignore_warnings
def test_sag_regressor_computed_correctly():
"""tests if the sag regressor is computed correctly"""
alpha = .1
n_features = 10
n_samples = 40
max_iter = 50
tol = .000001
fit_intercept = True
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w) + 2.
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf1 = Ridge(fit_intercept=fit_intercept, tol=tol, solver='sag',
alpha=alpha * n_samples, max_iter=max_iter)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights1, spintercept1 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights1.ravel(),
decimal=3)
assert_almost_equal(clf1.intercept_, spintercept1, decimal=1)
# TODO: uncomment when sparse Ridge with intercept will be fixed (#4710)
#assert_array_almost_equal(clf2.coef_.ravel(),
# spweights2.ravel(),
# decimal=3)
#assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)'''
@ignore_warnings
def test_get_auto_step_size():
X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64)
alpha = 1.2
fit_intercept = False
# sum the squares of the second sample because that's the largest
max_squared_sum = 4 + 9 + 16
max_squared_sum_ = row_norms(X, squared=True).max()
assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4)
for fit_intercept in (True, False):
step_size_sqr = 1.0 / (max_squared_sum + alpha + int(fit_intercept))
step_size_log = 4.0 / (max_squared_sum + 4.0 * alpha +
int(fit_intercept))
step_size_sqr_ = get_auto_step_size(max_squared_sum_, alpha, "squared",
fit_intercept)
step_size_log_ = get_auto_step_size(max_squared_sum_, alpha, "log",
fit_intercept)
assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4)
assert_almost_equal(step_size_log, step_size_log_, decimal=4)
msg = 'Unknown loss function for SAG solver, got wrong instead of'
assert_raise_message(ValueError, msg, get_auto_step_size,
max_squared_sum_, alpha, "wrong", fit_intercept)
@ignore_warnings
def test_sag_regressor():
"""tests if the sag regressor performs well"""
xmin, xmax = -5, 5
n_samples = 20
tol = .001
max_iter = 20
alpha = 0.1
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.99)
assert_greater(score2, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.5)
assert_greater(score2, 0.5)
@ignore_warnings
def test_sag_classifier_computed_correctly():
"""tests if the binary classifier is computed correctly"""
alpha = .1
n_samples = 50
n_iter = 50
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_sag_multiclass_computed_correctly():
"""tests if the multiclass classifier is computed correctly"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 40
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
sparse=True,
fit_intercept=fit_intercept)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
@ignore_warnings
def test_classifier_results():
"""tests if classifier results match target"""
alpha = .1
n_features = 20
n_samples = 10
tol = .01
max_iter = 200
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
y = np.sign(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert_almost_equal(pred1, y, decimal=12)
assert_almost_equal(pred2, y, decimal=12)
@ignore_warnings
def test_binary_classifier_class_weight():
"""tests binary classifier with classweights for each class"""
alpha = .1
n_samples = 50
n_iter = 20
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: .45, -1: .55}
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_multiclass_classifier_class_weight():
"""tests multiclass with classweights for each class"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 50
class_weight = {0: .45, 1: .55, 2: .75}
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight,
sparse=True)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
def test_classifier_single_class():
"""tests if ValueError is thrown with only one class"""
X = [[1, 2], [3, 4]]
y = [1, 1]
assert_raise_message(ValueError,
"This solver needs samples of at least 2 classes "
"in the data",
LogisticRegression(solver='sag').fit,
X, y)
def test_step_size_alpha_error():
X = [[0, 0], [0, 0]]
y = [1, -1]
fit_intercept = False
alpha = 1.
msg = ("Current sag implementation does not handle the case"
" step_size * alpha_scaled == 1")
clf1 = LogisticRegression(solver='sag', C=1. / alpha,
fit_intercept=fit_intercept)
assert_raise_message(ZeroDivisionError, msg, clf1.fit, X, y)
clf2 = Ridge(fit_intercept=fit_intercept, solver='sag', alpha=alpha)
assert_raise_message(ZeroDivisionError, msg, clf2.fit, X, y)
def test_multinomial_loss():
# test if the multinomial loss and gradient computations are consistent
X, y = iris.data, iris.target.astype(np.float64)
n_samples, n_features = X.shape
n_classes = len(np.unique(y))
rng = check_random_state(42)
weights = rng.randn(n_features, n_classes)
intercept = rng.randn(n_classes)
sample_weights = rng.randn(n_samples)
np.abs(sample_weights, sample_weights)
# compute loss and gradient like in multinomial SAG
dataset, _ = make_dataset(X, y, sample_weights, random_state=42)
loss_1, grad_1 = _multinomial_grad_loss_all_samples(dataset, weights,
intercept, n_samples,
n_features, n_classes)
# compute loss and gradient like in multinomial LogisticRegression
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
weights_intercept = np.vstack((weights, intercept)).T.ravel()
loss_2, grad_2, _ = _multinomial_loss_grad(weights_intercept, X, Y_bin,
0.0, sample_weights)
grad_2 = grad_2.reshape(n_classes, -1)
grad_2 = grad_2[:, :-1].T
# comparison
assert_array_almost_equal(grad_1, grad_2)
assert_almost_equal(loss_1, loss_2)
def test_multinomial_loss_ground_truth():
# n_samples, n_features, n_classes = 4, 2, 3
n_classes = 3
X = np.array([[1.1, 2.2], [2.2, -4.4], [3.3, -2.2], [1.1, 1.1]])
y = np.array([0, 1, 2, 0])
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
weights = np.array([[0.1, 0.2, 0.3], [1.1, 1.2, -1.3]])
intercept = np.array([1., 0, -.2])
sample_weights = np.array([0.8, 1, 1, 0.8])
prediction = np.dot(X, weights) + intercept
logsumexp_prediction = logsumexp(prediction, axis=1)
p = prediction - logsumexp_prediction[:, np.newaxis]
loss_1 = -(sample_weights[:, np.newaxis] * p * Y_bin).sum()
diff = sample_weights[:, np.newaxis] * (np.exp(p) - Y_bin)
grad_1 = np.dot(X.T, diff)
weights_intercept = np.vstack((weights, intercept)).T.ravel()
loss_2, grad_2, _ = _multinomial_loss_grad(weights_intercept, X, Y_bin,
0.0, sample_weights)
grad_2 = grad_2.reshape(n_classes, -1)
grad_2 = grad_2[:, :-1].T
assert_almost_equal(loss_1, loss_2)
assert_array_almost_equal(grad_1, grad_2)
# ground truth
loss_gt = 11.680360354325961
grad_gt = np.array([[-0.557487, -1.619151, +2.176638],
[-0.903942, +5.258745, -4.354803]])
assert_almost_equal(loss_1, loss_gt)
assert_array_almost_equal(grad_1, grad_gt)
|
bsd-3-clause
|
mplaine/www.laatukiikut.fi
|
2018/data_wrangling/Create Boulders Final.py
|
1
|
15366
|
# coding: utf-8
# # Suomen Parhaat Boulderit 2018: Create Boulders Final
# March 17, 2018
# <br>
# Google Maps JavaScript API key. See https://developers.google.com/maps/documentation/javascript/get-api-key
# In[1]:
GOOGLE_MAPS_JAVASCRIPT_API_KEY = "YOUR_API_KEY"
# <br>
# Import required modules
# In[2]:
import json
import time
import numpy as np
import pandas as pd
from geopy.geocoders import GoogleV3
from geopy.exc import GeocoderQueryError, GeocoderQuotaExceeded
# <br>
# Load the datafile `spb2018_-_cleaned.csv`, which contains the form responses to the **Suomen Parhaat Boulderit 2018** survey.
# In[3]:
# Load cleaned dataset
spb2018_df = pd.read_csv("data/survey_-_cleaned.csv")
# Drop duplicates (exclude the Timestamp column from comparisons)
spb2018_df = spb2018_df.drop_duplicates(subset=spb2018_df.columns.values.tolist()[1:])
spb2018_df.head()
# <br>
# Load the datafile `boulders_-_prefilled.csv`, which contains manually added details of each voted boulder.
# In[4]:
boulder_details_df = pd.read_csv("data/boulders_-_prefilled.csv", index_col="Name")
boulder_details_df.head()
# <br>
# Add column _VotedBy_
# In[5]:
"""
# Simpler but slower (appr. four times) implementation
# 533 ms ± 95.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
def add_column_votedby(column_name="VotedBy"):
# Gender mappings from Finnish to English
gender_dict = {
"Mies": "Male",
"Nainen": "Female"
}
# Iterate over boulders
for index, row in boulder_details_df.iterrows():
boulder_name = index
gender_s = spb2018_df.loc[(spb2018_df["Boulderin nimi"] == boulder_name) | (spb2018_df["Boulderin nimi.1"] == boulder_name) | (spb2018_df["Boulderin nimi.2"] == boulder_name), "Sukupuoli"]
boulder_details_df.loc[boulder_name, column_name] = gender_dict[gender_s.iloc[0]] if gender_s.nunique() == 1 else "Both"
"""
"""
# More complex but faster (appr. four times) implementation
# 136 ms ± 5.42 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
def add_column_votedby(column_name="VotedBy"):
# Initialize the new column
boulder_details_df[column_name] = ""
# Gender mappings from Finnish to English
gender_dict = {
"Mies": "Male",
"Nainen": "Female"
}
def update_genders(gender, boulder_names):
for boulder_name in boulder_names:
previous_gender = boulder_details_df.loc[boulder_name, column_name]
if previous_gender == "" or previous_gender == gender:
boulder_details_df.loc[boulder_name, column_name] = gender
else:
boulder_details_df.loc[boulder_name, column_name] = "Both"
# Iterate over form responses
for index, row in spb2018_df.iterrows():
gender = gender_dict[row["Sukupuoli"]]
boulder_names = [row["Boulderin nimi"], row["Boulderin nimi.1"], row["Boulderin nimi.2"]]
boulder_names = [boulder_name for boulder_name in boulder_names if pd.notnull(boulder_name)]
update_genders(gender, boulder_names)
"""
# Typical implementation
# 430 ms ± 78.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
def add_column_votedby(column_name="VotedBy"):
# Gender mappings from Finnish to English
gender_dict = {
"Mies": "Male",
"Nainen": "Female"
}
def set_voted_by(row):
boulder_name = row.name
gender_s = spb2018_df.loc[(spb2018_df["Boulderin nimi"] == boulder_name) | (spb2018_df["Boulderin nimi.1"] == boulder_name) | (spb2018_df["Boulderin nimi.2"] == boulder_name), "Sukupuoli"]
return gender_dict[gender_s.iloc[0]] if gender_s.nunique() == 1 else "Both"
boulder_details_df[column_name] = boulder_details_df.apply(set_voted_by, axis=1)
add_column_votedby()
boulder_details_df.head()
# <br>
# Add column *Votes*.
# In[6]:
def add_column_votes(column_name="Votes"):
boulder_name_columns = [spb2018_df["Boulderin nimi"], spb2018_df["Boulderin nimi.1"], spb2018_df["Boulderin nimi.2"]]
all_voted_boulders_s = pd.concat(boulder_name_columns, ignore_index=True).dropna()
boulder_votes_s = all_voted_boulders_s.value_counts()
boulder_details_df[column_name] = boulder_votes_s
add_column_votes()
boulder_details_df.sort_values(by=["Votes"], ascending=[False]).loc[boulder_details_df["Votes"] >= 3]
# <br>
# Add columns *Latitude* and *Longitude*.
# In[7]:
def add_columns_latitude_and_longitude(column_names=["Latitude", "Longitude"]):
boulder_details_df[[column_names[0], column_names[1]]] = boulder_details_df["Coordinates"].str.split(",", expand=True).astype(float)
add_columns_latitude_and_longitude()
boulder_details_df.head()
# <br>
# Add column *GradeNumeric*.
# In[8]:
def add_column_gradenumeric(column_name="GradeNumeric"):
# Grade mappings from Font to numeric
grade_dict = {
"?": 0,
"1": 1,
"2": 2,
"3": 3,
"4": 4,
"4+": 5,
"5": 6,
"5+": 7,
"6A": 8,
"6A+": 9,
"6B": 10,
"6B+": 11,
"6C": 12,
"6C+": 13,
"7A": 14,
"7A+": 15,
"7B": 16,
"7B+": 17,
"7C": 18,
"7C+": 19,
"8A": 20,
"8A+": 21,
"8B": 22,
"8B+": 23,
"8C": 24,
"8C+": 25,
"9A": 26
}
boulder_details_df[column_name] = boulder_details_df.apply(lambda row: str(grade_dict[row["Grade"]]) if pd.notnull(row["Grade"]) else np.nan, axis=1)
boulder_details_df[column_name] = boulder_details_df[column_name].astype(int)
add_column_gradenumeric()
boulder_details_df.head()
# <br>
# Add column *Adjectives*
# In[9]:
def add_column_adjectives(column_name="Adjectives"):
def set_adjectives(row):
boulder_name = row.name
adjectives1_s = spb2018_df.loc[(spb2018_df["Boulderin nimi"] == boulder_name), "Kuvaile boulderia kolmella (3) adjektiivilla"]
adjectives2_s = spb2018_df.loc[(spb2018_df["Boulderin nimi.1"] == boulder_name), "Kuvaile boulderia kolmella (3) adjektiivilla.1"]
adjectives3_s = spb2018_df.loc[(spb2018_df["Boulderin nimi.2"] == boulder_name), "Kuvaile boulderia kolmella (3) adjektiivilla.2"]
adjectives_s = adjectives1_s.append(adjectives2_s).append(adjectives3_s)
adjectives = ",".join(adjectives_s)
# Clean adjectives
adjectives = ",".join(sorted(list(set([adjective.strip().lower() for adjective in adjectives.split(",")]))))
return adjectives
boulder_details_df[column_name] = boulder_details_df.apply(set_adjectives, axis=1)
add_column_adjectives()
boulder_details_df.head()
# <br>
# Add column *MainHoldTypes*
# In[10]:
def add_column_main_hold_types(column_name="MainHoldTypes"):
def set_main_hold_types(row):
boulder_name = row.name
main_hold_types1_s = spb2018_df.loc[(spb2018_df["Boulderin nimi"] == boulder_name), "Boulderin pääotetyypit"]
main_hold_types2_s = spb2018_df.loc[(spb2018_df["Boulderin nimi.1"] == boulder_name), "Boulderin pääotetyypit.1"]
main_hold_types3_s = spb2018_df.loc[(spb2018_df["Boulderin nimi.2"] == boulder_name), "Boulderin pääotetyypit.2"]
main_hold_types_s = main_hold_types1_s.append(main_hold_types2_s).append(main_hold_types3_s)
main_hold_types = ",".join(main_hold_types_s)
# Clean main_hold_types
main_hold_types = ",".join(sorted(list(set([main_hold_type.strip().lower() for main_hold_type in main_hold_types.split(",")]))))
return main_hold_types
boulder_details_df[column_name] = boulder_details_df.apply(set_main_hold_types, axis=1)
add_column_main_hold_types()
boulder_details_df.head()
# <br>
# Add column *MainProfiles*
# In[11]:
def add_column_main_profiles(column_name="MainProfiles"):
def set_main_profiles(row):
boulder_name = row.name
main_profiles1_s = spb2018_df.loc[(spb2018_df["Boulderin nimi"] == boulder_name), "Boulderin pääprofiilit"]
main_profiles2_s = spb2018_df.loc[(spb2018_df["Boulderin nimi.1"] == boulder_name), "Boulderin pääprofiilit.1"]
main_profiles3_s = spb2018_df.loc[(spb2018_df["Boulderin nimi.2"] == boulder_name), "Boulderin pääprofiilit.2"]
main_profiles_s = main_profiles1_s.append(main_profiles2_s).append(main_profiles3_s)
main_profiles = ",".join(main_profiles_s)
# Clean main_profiles
main_profiles = ",".join(sorted(list(set([main_profile.strip().lower() for main_profile in main_profiles.split(",")]))))
return main_profiles
boulder_details_df[column_name] = boulder_details_df.apply(set_main_profiles, axis=1)
add_column_main_profiles()
boulder_details_df.head()
# <br>
# Add column *MainSkillsNeeded*
# In[12]:
def add_column_main_skills_needed(column_name="MainSkillsNeeded"):
def set_main_skills_needed(row):
boulder_name = row.name
main_skills_needed1_s = spb2018_df.loc[(spb2018_df["Boulderin nimi"] == boulder_name), "Boulderin kiipeämiseen vaadittavat pääkyvyt"]
main_skills_needed2_s = spb2018_df.loc[(spb2018_df["Boulderin nimi.1"] == boulder_name), "Boulderin kiipeämiseen vaadittavat pääkyvyt.1"]
main_skills_needed3_s = spb2018_df.loc[(spb2018_df["Boulderin nimi.2"] == boulder_name), "Boulderin kiipeämiseen vaadittavat pääkyvyt.2"]
main_skills_needed_s = main_skills_needed1_s.append(main_skills_needed2_s).append(main_skills_needed3_s)
main_skills_needed = ",".join(main_skills_needed_s)
# Clean main_skills_needed
main_skills_needed = ",".join(sorted(list(set([main_skill_needed.strip().lower() for main_skill_needed in main_skills_needed.split(",")]))))
return main_skills_needed
boulder_details_df[column_name] = boulder_details_df.apply(set_main_skills_needed, axis=1)
add_column_main_skills_needed()
boulder_details_df.head()
# <br>
# Add column *Comments*
# In[13]:
def add_column_comments(column_name="Comments"):
def set_comments(row):
boulder_name = row.name
comments1_s = spb2018_df.loc[(spb2018_df["Boulderin nimi"] == boulder_name), "Kuvaile boulderia omin sanoin (vapaaehtoinen)"]
comments2_s = spb2018_df.loc[(spb2018_df["Boulderin nimi.1"] == boulder_name), "Kuvaile boulderia omin sanoin (vapaaehtoinen).1"]
comments3_s = spb2018_df.loc[(spb2018_df["Boulderin nimi.2"] == boulder_name), "Kuvaile boulderia omin sanoin (vapaaehtoinen).2"]
comments_s = comments1_s.append(comments2_s).append(comments3_s)
comments = []
for index, value in comments_s.iteritems():
if pd.notnull(value):
comments.append(value.strip())
return ",".join("\"{}\"".format(comment) for comment in comments)
boulder_details_df[column_name] = boulder_details_df.apply(set_comments, axis=1)
add_column_comments()
boulder_details_df.head()
# <br>
# Add columns *AreaLevel1*, *AreaLevel2*, and *AreaLevel3*
# In[14]:
def add_columns_arealevel1_arealevel2_and_arealevel3(column_names=["AreaLevel1", "AreaLevel2", "AreaLevel3"]):
boulder_details_df.drop(columns=[column_names[0], column_names[1], column_names[2]], inplace=True, errors="ignore")
geolocator = GoogleV3(api_key=GOOGLE_MAPS_JAVASCRIPT_API_KEY)
def extract_administrative_area_levels(location_results, approximateLocation, area_levels_dict):
# List of location result types that we are interested in
location_result_types = ["administrative_area_level_1", "administrative_area_level_2", "administrative_area_level_3"]
# Iterate over location results
for location_result in location_results:
location_result_json = location_result.raw
# Extract data only from those location results that we are interested in
if any(location_result_type in location_result_json["types"] for location_result_type in location_result_types):
# Extract location result type
location_result_type = location_result_json["types"][0]
# Iterate over address components
for address_component in location_result_json["address_components"]:
# Extract data only from the matched location result type
if location_result_type in address_component["types"]:
# Extract the name of the administrative area level 1
if location_result_type == location_result_types[0]:
area_levels_dict["AreaLevel1"] = address_component["long_name"]
# Extract the name of the administrative area level 2
if location_result_type == location_result_types[1] and approximateLocation == "No":
area_levels_dict["AreaLevel2"] = address_component["long_name"]
# Extract the name of the administrative area level 3
if location_result_type == location_result_types[2] and approximateLocation == "No":
area_levels_dict["AreaLevel3"] = address_component["long_name"]
return area_levels_dict
def get_area_levels(row):
# Area levels template
area_levels_dict = {
column_names[0]: "",
column_names[1]: "",
column_names[2]: ""
}
geocoded = False
while geocoded is not True:
# Reverse geocode coordinates
try:
location_results = geolocator.reverse(row["Coordinates"], language="fi")
area_levels_dict = extract_administrative_area_levels(location_results, row["ApproximateCoordinates"], area_levels_dict)
geocoded = True
except GeocoderQueryError as gqe:
print("Geocoding error with {}: {}".format(row.name, str(gqe)))
print("Skipping {}".format(row.name))
geocoded = True
except GeocoderQuotaExceeded as gqe:
print("Geocoding quota exceeded: {}".format(str(gqe)))
print("Backing off for a bit")
time.sleep(30 * 60) # sleep for 30 minutes
print("Back in action")
return pd.Series(area_levels_dict)
boulder_area_levels_df = boulder_details_df[["Coordinates", "ApproximateCoordinates"]].apply(get_area_levels, axis=1)
return pd.merge(boulder_details_df, boulder_area_levels_df, how="outer", left_index=True, right_index=True)
boulder_details_df = add_columns_arealevel1_arealevel2_and_arealevel3()
boulder_details_df.head()
# <br>
# Create boulders final file `boulders_-_final.csv`.
# In[15]:
def create_boulders_final():
boulder_details_reset_df = boulder_details_df.reset_index()
boulder_details_reset_df = boulder_details_reset_df[["Votes", "VotedBy", "Name", "Grade", "GradeNumeric", "InFinland", "AreaLevel1", "AreaLevel2", "AreaLevel3", "Crag", "ApproximateCoordinates", "Coordinates", "Latitude", "Longitude", "Url27crags", "UrlVideo", "UrlStory", "MainProfiles", "MainHoldTypes", "MainSkillsNeeded", "Adjectives", "Comments"]]
boulder_details_reset_df = boulder_details_reset_df.sort_values(by=["Votes", "GradeNumeric", "Name"], ascending=[False, False, True])
boulder_details_reset_df.to_csv("data/boulders_-_final.csv", index=False)
create_boulders_final()
|
mit
|
lazywei/scikit-learn
|
benchmarks/bench_plot_incremental_pca.py
|
374
|
6430
|
"""
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
|
bsd-3-clause
|
blockstack/packaging
|
imported/future/src/future/utils/__init__.py
|
24
|
20523
|
"""
A selection of cross-compatible functions for Python 2 and 3.
This module exports useful functions for 2/3 compatible code:
* bind_method: binds functions to classes
* ``native_str_to_bytes`` and ``bytes_to_native_str``
* ``native_str``: always equal to the native platform string object (because
this may be shadowed by imports from future.builtins)
* lists: lrange(), lmap(), lzip(), lfilter()
* iterable method compatibility:
- iteritems, iterkeys, itervalues
- viewitems, viewkeys, viewvalues
These use the original method if available, otherwise they use items,
keys, values.
* types:
* text_type: unicode in Python 2, str in Python 3
* binary_type: str in Python 2, bythes in Python 3
* string_types: basestring in Python 2, str in Python 3
* bchr(c):
Take an integer and make a 1-character byte string
* bord(c)
Take the result of indexing on a byte string and make an integer
* tobytes(s)
Take a text string, a byte string, or a sequence of characters taken
from a byte string, and make a byte string.
* raise_from()
* raise_with_traceback()
This module also defines these decorators:
* ``python_2_unicode_compatible``
* ``with_metaclass``
* ``implements_iterator``
Some of the functions in this module come from the following sources:
* Jinja2 (BSD licensed: see
https://github.com/mitsuhiko/jinja2/blob/master/LICENSE)
* Pandas compatibility module pandas.compat
* six.py by Benjamin Peterson
* Django
"""
import types
import sys
import numbers
import functools
import copy
import inspect
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
PY26 = sys.version_info[0:2] == (2, 6)
PY27 = sys.version_info[0:2] == (2, 7)
PYPY = hasattr(sys, 'pypy_translation_info')
def python_2_unicode_compatible(cls):
"""
A decorator that defines __unicode__ and __str__ methods under Python
2. Under Python 3, this decorator is a no-op.
To support Python 2 and 3 with a single code base, define a __str__
method returning unicode text and apply this decorator to the class, like
this::
>>> from future.utils import python_2_unicode_compatible
>>> @python_2_unicode_compatible
... class MyClass(object):
... def __str__(self):
... return u'Unicode string: \u5b54\u5b50'
>>> a = MyClass()
Then, after this import:
>>> from future.builtins import str
the following is ``True`` on both Python 3 and 2::
>>> str(a) == a.encode('utf-8').decode('utf-8')
True
and, on a Unicode-enabled terminal with the right fonts, these both print the
Chinese characters for Confucius::
>>> print(a)
>>> print(str(a))
The implementation comes from django.utils.encoding.
"""
if not PY3:
cls.__unicode__ = cls.__str__
cls.__str__ = lambda self: self.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
"""
Function from jinja2/_compat.py. License: BSD.
Use it like this::
class BaseForm(object):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
This requires a bit of explanation: the basic idea is to make a
dummy metaclass for one level of class instantiation that replaces
itself with the actual metaclass. Because of internal type checks
we also need to make sure that we downgrade the custom metaclass
for one level to something closer to type (that's why __call__ and
__init__ comes back from type etc.).
This has the advantage over six.with_metaclass of not introducing
dummy classes into the final MRO.
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
# Definitions from pandas.compat and six.py follow:
if PY3:
def bchr(s):
return bytes([s])
def bstr(s):
if isinstance(s, str):
return bytes(s, 'latin-1')
else:
return bytes(s)
def bord(s):
return s
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
else:
# Python 2
def bchr(s):
return chr(s)
def bstr(s):
return str(s)
def bord(s):
return ord(s)
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
###
if PY3:
def tobytes(s):
if isinstance(s, bytes):
return s
else:
if isinstance(s, str):
return s.encode('latin-1')
else:
return bytes(s)
else:
# Python 2
def tobytes(s):
if isinstance(s, unicode):
return s.encode('latin-1')
else:
return ''.join(s)
tobytes.__doc__ = """
Encodes to latin-1 (where the first 256 chars are the same as
ASCII.)
"""
if PY3:
def native_str_to_bytes(s, encoding='utf-8'):
return s.encode(encoding)
def bytes_to_native_str(b, encoding='utf-8'):
return b.decode(encoding)
def text_to_native_str(t, encoding=None):
return t
else:
# Python 2
def native_str_to_bytes(s, encoding=None):
from future.types import newbytes # to avoid a circular import
return newbytes(s)
def bytes_to_native_str(b, encoding=None):
return native(b)
def text_to_native_str(t, encoding='ascii'):
"""
Use this to create a Py2 native string when "from __future__ import
unicode_literals" is in effect.
"""
return unicode(t).encode(encoding)
native_str_to_bytes.__doc__ = """
On Py3, returns an encoded string.
On Py2, returns a newbytes type, ignoring the ``encoding`` argument.
"""
if PY3:
# list-producing versions of the major Python iterating functions
def lrange(*args, **kwargs):
return list(range(*args, **kwargs))
def lzip(*args, **kwargs):
return list(zip(*args, **kwargs))
def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
else:
import __builtin__
# Python 2-builtin ranges produce lists
lrange = __builtin__.range
lzip = __builtin__.zip
lmap = __builtin__.map
lfilter = __builtin__.filter
def isidentifier(s, dotted=False):
'''
A function equivalent to the str.isidentifier method on Py3
'''
if dotted:
return all(isidentifier(a) for a in s.split('.'))
if PY3:
return s.isidentifier()
else:
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
return bool(_name_re.match(s))
def viewitems(obj, **kwargs):
"""
Function for iterating over dictionary items with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewitems", None)
if not func:
func = obj.items
return func(**kwargs)
def viewkeys(obj, **kwargs):
"""
Function for iterating over dictionary keys with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def viewvalues(obj, **kwargs):
"""
Function for iterating over dictionary values with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewvalues", None)
if not func:
func = obj.values
return func(**kwargs)
def iteritems(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewitems().
"""
func = getattr(obj, "iteritems", None)
if not func:
func = obj.items
return func(**kwargs)
def iterkeys(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewkeys().
"""
func = getattr(obj, "iterkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def itervalues(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewvalues().
"""
func = getattr(obj, "itervalues", None)
if not func:
func = obj.values
return func(**kwargs)
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has an issue with bound/unbound methods
if not PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
def getexception():
return sys.exc_info()[1]
def _get_caller_globals_and_locals():
"""
Returns the globals and locals of the calling frame.
Is there an alternative to frame hacking here?
"""
caller_frame = inspect.stack()[2]
myglobals = caller_frame[0].f_globals
mylocals = caller_frame[0].f_locals
return myglobals, mylocals
def _repr_strip(mystring):
"""
Returns the string without any initial or final quotes.
"""
r = repr(mystring)
if r.startswith("'") and r.endswith("'"):
return r[1:-1]
else:
return r
if PY3:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
# Is either arg an exception class (e.g. IndexError) rather than
# instance (e.g. IndexError('my message here')? If so, pass the
# name of the class undisturbed through to "raise ... from ...".
if isinstance(exc, type) and issubclass(exc, Exception):
exc = exc.__name__
if isinstance(cause, type) and issubclass(cause, Exception):
cause = cause.__name__
execstr = "raise " + _repr_strip(exc) + " from " + _repr_strip(cause)
myglobals, mylocals = _get_caller_globals_and_locals()
exec(execstr, myglobals, mylocals)
def raise_(tp, value=None, tb=None):
"""
A function that matches the Python 2.x ``raise`` statement. This
allows re-raising exceptions with the cls value and traceback on
Python 2 and 3.
"""
if value is not None and isinstance(tp, Exception):
raise TypeError("instance exception may not have a separate value")
if value is not None:
exc = tp(value)
else:
exc = tp
if exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
raise exc
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
else:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
# Is either arg an exception class (e.g. IndexError) rather than
# instance (e.g. IndexError('my message here')? If so, pass the
# name of the class undisturbed through to "raise ... from ...".
if isinstance(exc, type) and issubclass(exc, Exception):
e = exc()
# exc = exc.__name__
# execstr = "e = " + _repr_strip(exc) + "()"
# myglobals, mylocals = _get_caller_globals_and_locals()
# exec(execstr, myglobals, mylocals)
else:
e = exc
e.__suppress_context__ = False
if isinstance(cause, type) and issubclass(cause, Exception):
e.__cause__ = cause()
e.__suppress_context__ = True
elif cause is None:
e.__cause__ = None
e.__suppress_context__ = True
elif isinstance(cause, BaseException):
e.__cause__ = cause
e.__suppress_context__ = True
else:
raise TypeError("exception causes must derive from BaseException")
e.__context__ = sys.exc_info()[1]
raise e
exec('''
def raise_(tp, value=None, tb=None):
raise tp, value, tb
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc, None, traceback
'''.strip())
raise_with_traceback.__doc__ = (
"""Raise exception with existing traceback.
If traceback is not passed, uses sys.exc_info() to get traceback."""
)
# Deprecated alias for backward compatibility with ``future`` versions < 0.11:
reraise = raise_
def implements_iterator(cls):
'''
From jinja2/_compat.py. License: BSD.
Use as a decorator like this::
@implements_iterator
class UppercasingIterator(object):
def __init__(self, iterable):
self._iter = iter(iterable)
def __iter__(self):
return self
def __next__(self):
return next(self._iter).upper()
'''
if PY3:
return cls
else:
cls.next = cls.__next__
del cls.__next__
return cls
if PY3:
get_next = lambda x: x.next
else:
get_next = lambda x: x.__next__
def encode_filename(filename):
if PY3:
return filename
else:
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
def is_new_style(cls):
"""
Python 2.7 has both new-style and old-style classes. Old-style classes can
be pesky in some circumstances, such as when using inheritance. Use this
function to test for whether a class is new-style. (Python 3 only has
new-style classes.)
"""
return hasattr(cls, '__class__') and ('__dict__' in dir(cls)
or hasattr(cls, '__slots__'))
# The native platform string and bytes types. Useful because ``str`` and
# ``bytes`` are redefined on Py2 by ``from future.builtins import *``.
native_str = str
native_bytes = bytes
def istext(obj):
"""
Deprecated. Use::
>>> isinstance(obj, str)
after this import:
>>> from future.builtins import str
"""
return isinstance(obj, type(u''))
def isbytes(obj):
"""
Deprecated. Use::
>>> isinstance(obj, bytes)
after this import:
>>> from future.builtins import bytes
"""
return isinstance(obj, type(b''))
def isnewbytes(obj):
"""
Equivalent to the result of ``isinstance(obj, newbytes)`` were
``__instancecheck__`` not overridden on the newbytes subclass. In
other words, it is REALLY a newbytes instance, not a Py2 native str
object?
"""
# TODO: generalize this so that it works with subclasses of newbytes
# Import is here to avoid circular imports:
from future.types.newbytes import newbytes
return type(obj) == newbytes
def isint(obj):
"""
Deprecated. Tests whether an object is a Py3 ``int`` or either a Py2 ``int`` or
``long``.
Instead of using this function, you can use:
>>> from future.builtins import int
>>> isinstance(obj, int)
The following idiom is equivalent:
>>> from numbers import Integral
>>> isinstance(obj, Integral)
"""
return isinstance(obj, numbers.Integral)
def native(obj):
"""
On Py3, this is a no-op: native(obj) -> obj
On Py2, returns the corresponding native Py2 types that are
superclasses for backported objects from Py3:
>>> from builtins import str, bytes, int
>>> native(str(u'ABC'))
u'ABC'
>>> type(native(str(u'ABC')))
unicode
>>> native(bytes(b'ABC'))
b'ABC'
>>> type(native(bytes(b'ABC')))
bytes
>>> native(int(10**20))
100000000000000000000L
>>> type(native(int(10**20)))
long
Existing native types on Py2 will be returned unchanged:
>>> type(native(u'ABC'))
unicode
"""
if hasattr(obj, '__native__'):
return obj.__native__()
else:
return obj
# Implementation of exec_ is from ``six``:
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
# Defined here for backward compatibility:
def old_div(a, b):
"""
DEPRECATED: import ``old_div`` from ``past.utils`` instead.
Equivalent to ``a / b`` on Python 2 without ``from __future__ import
division``.
TODO: generalize this to other objects (like arrays etc.)
"""
if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral):
return a // b
else:
return a / b
def as_native_str(encoding='utf-8'):
'''
A decorator to turn a function or method call that returns text, i.e.
unicode, into one that returns a native platform str.
Use it as a decorator like this::
from __future__ import unicode_literals
class MyClass(object):
@as_native_str(encoding='ascii')
def __repr__(self):
return next(self._iter).upper()
'''
if PY3:
return lambda f: f
else:
def encoder(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs).encode(encoding=encoding)
return wrapper
return encoder
# listvalues and listitems definitions from Nick Coghlan's (withdrawn)
# PEP 496:
try:
dict.iteritems
except AttributeError:
# Python 3
def listvalues(d):
return list(d.values())
def listitems(d):
return list(d.items())
else:
# Python 2
def listvalues(d):
return d.values()
def listitems(d):
return d.items()
if PY3:
def ensure_new_type(obj):
return obj
else:
def ensure_new_type(obj):
from future.types.newbytes import newbytes
from future.types.newstr import newstr
from future.types.newint import newint
from future.types.newdict import newdict
native_type = type(native(obj))
# Upcast only if the type is already a native (non-future) type
if issubclass(native_type, type(obj)):
# Upcast
if native_type == str: # i.e. Py2 8-bit str
return newbytes(obj)
elif native_type == unicode:
return newstr(obj)
elif native_type == int:
return newint(obj)
elif native_type == long:
return newint(obj)
elif native_type == dict:
return newdict(obj)
else:
return NotImplementedError('type %s not supported' % type(obj))
else:
# Already a new type
assert type(obj) in [newbytes, newstr]
return obj
__all__ = ['PY2', 'PY26', 'PY3', 'PYPY',
'as_native_str', 'bind_method', 'bord', 'bstr',
'bytes_to_native_str', 'encode_filename', 'ensure_new_type',
'exec_', 'get_next', 'getexception', 'implements_iterator',
'is_new_style', 'isbytes', 'isidentifier', 'isint',
'isnewbytes', 'istext', 'iteritems', 'iterkeys', 'itervalues',
'lfilter', 'listitems', 'listvalues', 'lmap', 'lrange',
'lzip', 'native', 'native_bytes', 'native_str',
'native_str_to_bytes', 'old_div',
'python_2_unicode_compatible', 'raise_',
'raise_with_traceback', 'reraise', 'text_to_native_str',
'tobytes', 'viewitems', 'viewkeys', 'viewvalues',
'with_metaclass'
]
|
gpl-3.0
|
idbedead/RNA-sequence-tools
|
picard_sort_insertmetrics.py
|
2
|
1747
|
import fnmatch
import os
import pandas as pd
import subprocess
'''This program takes accepted_hits.bam files from tophat and turns them into
counts and creates matrix file of cells/conditions and counts using htseq:
http://www-huber.embl.de/HTSeq/doc/overview.html
The files are sorted using samtools: http://www.htslib.org/
Paired end mates are fixed and RNA metrics collected using Picard tools:
http://broadinstitute.github.io/picard/
The 3' to 5' bias of each sample is collected as a matrix file for easy plotting.
'''
#list of file paths with mapped hits
pats = ['rsem_m38_Cfms-d7-Sham_insert_metrics']
#output path
#initialize dictonaries for collected output
fpkm_matrix_dict_g ={}
count_dict = {}
norm_read_dict = {}
picard_stats_dict = {}
#collect gene_list once since it the same between all samples
st = 1
gene_list = []
#loop through all files and sort, fix, count, collect metrics on each
for p in pats:
for root, dirnames, filenames in os.walk(p):
for filename in fnmatch.filter(filenames, 'insert_size_metrics.txt'):
with open(os.path.join(root,'insert_size_metrics.txt'), mode='r') as f:
next_line=-1
cname=os.path.basename(root)
for i, l in enumerate(f):
if l[0:18] == 'MEDIAN_INSERT_SIZE':
titles = l.split('\t')
next_line= i+1
if i == next_line:
metrics = l.split('\t')
a = dict(zip(titles, metrics))
picard_stats_dict[cname] = a
final_df = pd.DataFrame(picard_stats_dict)
final_df_t = final_df.transpose()
final_df_t.to_csv(os.path.basename(pats[0])+"_picard_insert_metrics.txt", sep='\t')
|
mit
|
tdhopper/scikit-learn
|
sklearn/manifold/t_sne.py
|
48
|
20644
|
# Author: Alexander Fabisch -- <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i + 1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
return self
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
min_grad_norm=self.min_grad_norm,
n_iter_without_progress=self.n_iter_without_progress,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X)
return self.embedding_
|
bsd-3-clause
|
asnorkin/sentiment_analysis
|
site/lib/python2.7/site-packages/numpy/linalg/linalg.py
|
24
|
77839
|
"""Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar, object_
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.44721360-0.j , -0.89442719+0.j ],
[ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]])
array([[ 0.89442719+0.j , 0.00000000-0.4472136j],
[ 0.00000000-0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if not (0 <= row_axis < nd and 0 <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
|
mit
|
Winand/pandas
|
pandas/tests/io/json/test_pandas.py
|
11
|
44634
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import pytest
from pandas.compat import (range, lrange, StringIO,
OrderedDict, is_platform_32bit)
import os
import numpy as np
from pandas import (Series, DataFrame, DatetimeIndex, Timestamp,
read_json, compat)
from datetime import timedelta
import pandas as pd
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
assert_series_equal, network,
ensure_clean, assert_index_equal)
import pandas.util.testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
_intframe = DataFrame(dict((k, v.astype(np.int64))
for k, v in compat.iteritems(_seriesd)))
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ['bah'] * 5 + ['bar'] * 5 + ['baz'] * \
5 + ['foo'] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name='E')
_cat_frame['E'] = list(reversed(cat))
_cat_frame['sort'] = np.arange(len(_cat_frame), dtype='int64')
_mixed_frame = _frame.copy()
class TestPandasContainer(object):
def setup_method(self, method):
self.dirpath = tm.get_data_path()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty_series = Series([], index=[])
self.empty_frame = DataFrame({})
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
def teardown_method(self, method):
del self.dirpath
del self.ts
del self.series
del self.objSeries
del self.empty_series
del self.empty_frame
del self.frame
del self.frame2
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self):
df = DataFrame([['a', 'b'], ['c', 'd']],
index=['index " 1', 'index / 2'],
columns=['a \\ b', 'y / z'])
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split'))
assert_frame_equal(df, read_json(df.to_json(orient='columns'),
orient='columns'))
assert_frame_equal(df, read_json(df.to_json(orient='index'),
orient='index'))
df_unser = read_json(df.to_json(orient='records'), orient='records')
assert_index_equal(df.columns, df_unser.columns)
tm.assert_numpy_array_equal(df.values, df_unser.values)
def test_frame_non_unique_index(self):
df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 1],
columns=['x', 'y'])
pytest.raises(ValueError, df.to_json, orient='index')
pytest.raises(ValueError, df.to_json, orient='columns')
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split'))
unser = read_json(df.to_json(orient='records'), orient='records')
tm.assert_index_equal(df.columns, unser.columns)
tm.assert_almost_equal(df.values, unser.values)
unser = read_json(df.to_json(orient='values'), orient='values')
tm.assert_numpy_array_equal(df.values, unser.values)
def test_frame_non_unique_columns(self):
df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 2],
columns=['x', 'x'])
pytest.raises(ValueError, df.to_json, orient='index')
pytest.raises(ValueError, df.to_json, orient='columns')
pytest.raises(ValueError, df.to_json, orient='records')
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split', dtype=False))
unser = read_json(df.to_json(orient='values'), orient='values')
tm.assert_numpy_array_equal(df.values, unser.values)
# GH4377; duplicate columns not processing correctly
df = DataFrame([['a', 'b'], ['c', 'd']], index=[
1, 2], columns=['x', 'y'])
result = read_json(df.to_json(orient='split'), orient='split')
assert_frame_equal(result, df)
def _check(df):
result = read_json(df.to_json(orient='split'), orient='split',
convert_dates=['x'])
assert_frame_equal(result, df)
for o in [[['a', 'b'], ['c', 'd']],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp('20130101'), 3.5],
[Timestamp('20130102'), 4.5]]]:
_check(DataFrame(o, index=[1, 2], columns=['x', 'x']))
def test_frame_from_json_to_json(self):
def _check_orient(df, orient, dtype=None, numpy=False,
convert_axes=True, check_dtype=True, raise_ok=None,
sort=None, check_index_type=True,
check_column_type=True, check_numpy_dtype=False):
if sort is not None:
df = df.sort_values(sort)
else:
df = df.sort_index()
# if we are not unique, then check that we are raising ValueError
# for the appropriate orients
if not df.index.is_unique and orient in ['index', 'columns']:
pytest.raises(
ValueError, lambda: df.to_json(orient=orient))
return
if (not df.columns.is_unique and
orient in ['index', 'columns', 'records']):
pytest.raises(
ValueError, lambda: df.to_json(orient=orient))
return
dfjson = df.to_json(orient=orient)
try:
unser = read_json(dfjson, orient=orient, dtype=dtype,
numpy=numpy, convert_axes=convert_axes)
except Exception as detail:
if raise_ok is not None:
if isinstance(detail, raise_ok):
return
raise
if sort is not None and sort in unser.columns:
unser = unser.sort_values(sort)
else:
unser = unser.sort_index()
if dtype is False:
check_dtype = False
if not convert_axes and df.index.dtype.type == np.datetime64:
unser.index = DatetimeIndex(
unser.index.values.astype('i8') * 1e6)
if orient == "records":
# index is not captured in this orientation
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
tm.assert_index_equal(df.columns, unser.columns,
exact=check_column_type)
elif orient == "values":
# index and cols are not captured in this orientation
if numpy is True and df.shape == (0, 0):
assert unser.shape[0] == 0
else:
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
elif orient == "split":
# index and col labels might not be strings
unser.index = [str(i) for i in unser.index]
unser.columns = [str(i) for i in unser.columns]
if sort is None:
unser = unser.sort_index()
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
else:
if convert_axes:
tm.assert_frame_equal(df, unser, check_dtype=check_dtype,
check_index_type=check_index_type,
check_column_type=check_column_type)
else:
tm.assert_frame_equal(df, unser, check_less_precise=False,
check_dtype=check_dtype)
def _check_all_orients(df, dtype=None, convert_axes=True,
raise_ok=None, sort=None, check_index_type=True,
check_column_type=True):
# numpy=False
if convert_axes:
_check_orient(df, "columns", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "records", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "split", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "index", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "values", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "columns", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "records", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "split", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "index", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "values", dtype=dtype,
convert_axes=False, sort=sort)
# numpy=True and raise_ok might be not None, so ignore the error
if convert_axes:
_check_orient(df, "columns", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "records", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "split", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "index", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "values", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "columns", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "records", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "split", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "index", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "values", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
# basic
_check_all_orients(self.frame)
assert self.frame.to_json() == self.frame.to_json(orient="columns")
_check_all_orients(self.intframe, dtype=self.intframe.values.dtype)
_check_all_orients(self.intframe, dtype=False)
# big one
# index and columns are strings as all unserialised JSON object keys
# are assumed to be strings
biggie = DataFrame(np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)])
_check_all_orients(biggie, dtype=False, convert_axes=False)
# dtypes
_check_all_orients(DataFrame(biggie, dtype=np.float64),
dtype=np.float64, convert_axes=False)
_check_all_orients(DataFrame(biggie, dtype=np.int), dtype=np.int,
convert_axes=False)
_check_all_orients(DataFrame(biggie, dtype='U3'), dtype='U3',
convert_axes=False, raise_ok=ValueError)
# categorical
_check_all_orients(self.categorical, sort='sort', raise_ok=ValueError)
# empty
_check_all_orients(self.empty_frame, check_index_type=False,
check_column_type=False)
# time series data
_check_all_orients(self.tsframe)
# mixed data
index = pd.Index(['a', 'b', 'c', 'd', 'e'])
data = {'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': [True, False, True, False, True]}
df = DataFrame(data=data, index=index)
_check_orient(df, "split", check_dtype=False)
_check_orient(df, "records", check_dtype=False)
_check_orient(df, "values", check_dtype=False)
_check_orient(df, "columns", check_dtype=False)
# index oriented is problematic as it is read back in in a transposed
# state, so the columns are interpreted as having mixed data and
# given object dtypes.
# force everything to have object dtype beforehand
_check_orient(df.transpose().transpose(), "index", dtype=False)
def test_frame_from_json_bad_data(self):
pytest.raises(ValueError, read_json, StringIO('{"key":b:a:d}'))
# too few indices
json = StringIO('{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
pytest.raises(ValueError, read_json, json,
orient="split")
# too many columns
json = StringIO('{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
pytest.raises(AssertionError, read_json, json,
orient="split")
# bad key
json = StringIO('{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
with tm.assert_raises_regex(ValueError,
r"unexpected key\(s\): badkey"):
read_json(json, orient="split")
def test_frame_from_json_nones(self):
df = DataFrame([[1, 2], [4, 5, 6]])
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
df = DataFrame([['1', '2'], ['4', '5', '6']])
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert unser[2][0] is None
unser = read_json(df.to_json(), convert_axes=False, dtype=False)
assert unser['2']['0'] is None
unser = read_json(df.to_json(), numpy=False)
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), numpy=False, dtype=False)
assert unser[2][0] is None
unser = read_json(df.to_json(), numpy=False,
convert_axes=False, dtype=False)
assert unser['2']['0'] is None
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = np.inf
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert np.isnan(unser[2][0])
df.loc[0, 2] = np.NINF
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert np.isnan(unser[2][0])
@pytest.mark.skipif(is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_frame_to_json_float_precision(self):
df = pd.DataFrame([dict(a_float=0.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=1.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":2.0}}'
df = pd.DataFrame([dict(a_float=-1.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":-2.0}}'
df = pd.DataFrame([dict(a_float=0.995)])
encoded = df.to_json(double_precision=2)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=0.9995)])
encoded = df.to_json(double_precision=3)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=0.99999999999999944)])
encoded = df.to_json(double_precision=15)
assert encoded == '{"a_float":{"0":1.0}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
pytest.raises(ValueError, df.to_json, orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=['jim', 'joe'])
assert not df._is_mixed_type
assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
check_index_type=False)
# GH 7445
result = pd.DataFrame({'test': []}, index=[]).to_json(orient='columns')
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=['jim', 'joe'])
df['joe'] = df['joe'].astype('i8')
assert df._is_mixed_type
assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
check_index_type=False)
def test_frame_mixedtype_orient(self): # GH10289
vals = [[10, 1, 'foo', .1, .01],
[20, 2, 'bar', .2, .02],
[30, 3, 'baz', .3, .03],
[40, 4, 'qux', .4, .04]]
df = DataFrame(vals, index=list('abcd'),
columns=['1st', '2nd', '3rd', '4th', '5th'])
assert df._is_mixed_type
right = df.copy()
for orient in ['split', 'index', 'columns']:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient='records')
left = read_json(inp, orient='records', convert_axes=False)
assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient='values')
left = read_json(inp, orient='values', convert_axes=False)
assert_frame_equal(left, right)
def test_v12_compat(self):
df = DataFrame(
[[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478]],
columns=['A', 'B', 'C', 'D'],
index=pd.date_range('2000-01-03', '2000-01-07'))
df['date'] = pd.Timestamp('19920106 18:21:32.12')
df.iloc[3, df.columns.get_loc('date')] = pd.Timestamp('20130101')
df['modified'] = df['date']
df.iloc[1, df.columns.get_loc('modified')] = pd.NaT
v12_json = os.path.join(self.dirpath, 'tsframe_v012.json')
df_unser = pd.read_json(v12_json)
assert_frame_equal(df, df_unser)
df_iso = df.drop(['modified'], axis=1)
v12_iso_json = os.path.join(self.dirpath, 'tsframe_iso_v012.json')
df_unser_iso = pd.read_json(v12_iso_json)
assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range('20000101', periods=10, freq='H')
df_mixed = DataFrame(OrderedDict(
float_1=[-0.92077639, 0.77434435, 1.25234727, 0.61485564,
-0.60316077, 0.24653374, 0.28668979, -2.51969012,
0.95748401, -1.02970536],
int_1=[19680418, 75337055, 99973684, 65103179, 79373900,
40314334, 21290235, 4991321, 41903419, 16008365],
str_1=['78c608f1', '64a99743', '13d2ff52', 'ca7f4af2', '97236474',
'bde7e214', '1a6bde47', 'b1190be5', '7a669144', '8d64d068'],
float_2=[-0.0428278, -1.80872357, 3.36042349, -0.7573685,
-0.48217572, 0.86229683, 1.08935819, 0.93898739,
-0.03030452, 1.43366348],
str_2=['14f04af9', 'd085da90', '4bcfac83', '81504caf', '2ffef4a9',
'08e2f5c4', '07e1af03', 'addbd4a7', '1f6a09ba', '4bfc4d87'],
int_2=[86967717, 98098830, 51927505, 20372254, 12601730, 20884027,
34193846, 10561746, 24867120, 76131025]
), index=index)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype('unicode')
df_roundtrip = pd.read_json(df_mixed.to_json(orient='split'),
orient='split')
assert_frame_equal(df_mixed, df_roundtrip,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
by_blocks=True,
check_exact=True)
def test_series_non_unique_index(self):
s = Series(['a', 'b'], index=[1, 1])
pytest.raises(ValueError, s.to_json, orient='index')
assert_series_equal(s, read_json(s.to_json(orient='split'),
orient='split', typ='series'))
unser = read_json(s.to_json(orient='records'),
orient='records', typ='series')
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_from_json_to_json(self):
def _check_orient(series, orient, dtype=None, numpy=False,
check_index_type=True):
series = series.sort_index()
unser = read_json(series.to_json(orient=orient),
typ='series', orient=orient, numpy=numpy,
dtype=dtype)
unser = unser.sort_index()
if orient == "records" or orient == "values":
assert_almost_equal(series.values, unser.values)
else:
if orient == "split":
assert_series_equal(series, unser,
check_index_type=check_index_type)
else:
assert_series_equal(series, unser, check_names=False,
check_index_type=check_index_type)
def _check_all_orients(series, dtype=None, check_index_type=True):
_check_orient(series, "columns", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "records", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "split", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "index", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "values", dtype=dtype)
_check_orient(series, "columns", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "records", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "split", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "index", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "values", dtype=dtype, numpy=True,
check_index_type=check_index_type)
# basic
_check_all_orients(self.series)
assert self.series.to_json() == self.series.to_json(orient="index")
objSeries = Series([str(d) for d in self.objSeries],
index=self.objSeries.index,
name=self.objSeries.name)
_check_all_orients(objSeries, dtype=False)
# empty_series has empty index with object dtype
# which cannot be revert
assert self.empty_series.index.dtype == np.object_
_check_all_orients(self.empty_series, check_index_type=False)
_check_all_orients(self.ts)
# dtype
s = Series(lrange(6), index=['a', 'b', 'c', 'd', 'e', 'f'])
_check_all_orients(Series(s, dtype=np.float64), dtype=np.float64)
_check_all_orients(Series(s, dtype=np.int), dtype=np.int)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
pytest.raises(ValueError, s.to_json, orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ='series', precise_float=True)
assert_series_equal(result, s, check_index_type=False)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
assert_frame_equal(result, df, check_index_type=False,
check_column_type=False)
def test_typ(self):
s = Series(lrange(6), index=['a', 'b', 'c',
'd', 'e', 'f'], dtype='int64')
result = read_json(s.to_json(), typ=None)
assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
assert_frame_equal(result, df)
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, index=['A', 'B', 'C'])
result = read_json(df.to_json())
assert_frame_equal(result, df)
def test_path(self):
with ensure_clean('test.json') as path:
for df in [self.frame, self.frame2, self.intframe, self.tsframe,
self.mixed_frame]:
df.to_json(path)
read_json(path)
def test_axis_dates(self):
# frame
json = self.tsframe.to_json()
result = read_json(json)
assert_frame_equal(result, self.tsframe)
# series
json = self.ts.to_json()
result = read_json(json, typ='series')
assert_series_equal(result, self.ts, check_names=False)
assert result.name is None
def test_convert_dates(self):
# frame
df = self.tsframe.copy()
df['date'] = Timestamp('20130101')
json = df.to_json()
result = read_json(json)
assert_frame_equal(result, df)
df['foo'] = 1.
json = df.to_json(date_unit='ns')
result = read_json(json, convert_dates=False)
expected = df.copy()
expected['date'] = expected['date'].values.view('i8')
expected['foo'] = expected['foo'].astype('int64')
assert_frame_equal(result, expected)
# series
ts = Series(Timestamp('20130101'), index=self.ts.index)
json = ts.to_json()
result = read_json(json, typ='series')
assert_series_equal(result, ts)
def test_convert_dates_infer(self):
# GH10747
from pandas.io.json import dumps
infer_words = ['trade_time', 'date', 'datetime', 'sold_at',
'modified', 'timestamp', 'timestamps']
for infer_word in infer_words:
data = [{'id': 1, infer_word: 1036713600000}, {'id': 2}]
expected = DataFrame([[1, Timestamp('2002-11-08')], [2, pd.NaT]],
columns=['id', infer_word])
result = read_json(dumps(data))[['id', infer_word]]
assert_frame_equal(result, expected)
def test_date_format_frame(self):
df = self.tsframe.copy()
def test_w_date(date, date_unit=None):
df['date'] = Timestamp(date)
df.iloc[1, df.columns.get_loc('date')] = pd.NaT
df.iloc[5, df.columns.get_loc('date')] = pd.NaT
if date_unit:
json = df.to_json(date_format='iso', date_unit=date_unit)
else:
json = df.to_json(date_format='iso')
result = read_json(json)
assert_frame_equal(result, df)
test_w_date('20130101 20:43:42.123')
test_w_date('20130101 20:43:42', date_unit='s')
test_w_date('20130101 20:43:42.123', date_unit='ms')
test_w_date('20130101 20:43:42.123456', date_unit='us')
test_w_date('20130101 20:43:42.123456789', date_unit='ns')
pytest.raises(ValueError, df.to_json, date_format='iso',
date_unit='foo')
def test_date_format_series(self):
def test_w_date(date, date_unit=None):
ts = Series(Timestamp(date), index=self.ts.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format='iso', date_unit=date_unit)
else:
json = ts.to_json(date_format='iso')
result = read_json(json, typ='series')
assert_series_equal(result, ts)
test_w_date('20130101 20:43:42.123')
test_w_date('20130101 20:43:42', date_unit='s')
test_w_date('20130101 20:43:42.123', date_unit='ms')
test_w_date('20130101 20:43:42.123456', date_unit='us')
test_w_date('20130101 20:43:42.123456789', date_unit='ns')
ts = Series(Timestamp('20130101 20:43:42.123'), index=self.ts.index)
pytest.raises(ValueError, ts.to_json, date_format='iso',
date_unit='foo')
def test_date_unit(self):
df = self.tsframe.copy()
df['date'] = Timestamp('20130101 20:43:42')
dl = df.columns.get_loc('date')
df.iloc[1, dl] = Timestamp('19710101 20:43:42')
df.iloc[2, dl] = Timestamp('21460101 20:43:42')
df.iloc[4, dl] = pd.NaT
for unit in ('s', 'ms', 'us', 'ns'):
json = df.to_json(date_format='epoch', date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r'''{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}'''
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list('AB'))
dfj2['date'] = Timestamp('20130101')
dfj2['ints'] = lrange(5)
dfj2['bools'] = True
dfj2.index = pd.date_range('20130101', periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={'ints': np.int64, 'bools': np.bool_})
assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\[u?'a', u?'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with tm.assert_raises_regex(AssertionError, error_msg):
assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
@network
def test_round_trip_exception_(self):
# GH 3867
csv = 'https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv'
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
assert_frame_equal(result.reindex(
index=df.index, columns=df.columns), df)
@network
def test_url(self):
url = 'https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5' # noqa
result = read_json(url, convert_dates=True)
for c in ['created_at', 'closed_at', 'updated_at']:
assert result[c].dtype == 'datetime64[ns]'
def test_timedelta(self):
converter = lambda x: pd.to_timedelta(x, unit='ms')
s = Series([timedelta(23), timedelta(seconds=5)])
assert s.dtype == 'timedelta64[ns]'
result = pd.read_json(s.to_json(), typ='series').apply(converter)
assert_series_equal(result, s)
s = Series([timedelta(23), timedelta(seconds=5)],
index=pd.Index([0, 1]))
assert s.dtype == 'timedelta64[ns]'
result = pd.read_json(s.to_json(), typ='series').apply(converter)
assert_series_equal(result, s)
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
assert frame[0].dtype == 'timedelta64[ns]'
assert_frame_equal(frame, pd.read_json(frame.to_json())
.apply(converter))
frame = DataFrame({'a': [timedelta(days=23), timedelta(seconds=5)],
'b': [1, 2],
'c': pd.date_range(start='20130101', periods=2)})
result = pd.read_json(frame.to_json(date_unit='ns'))
result['a'] = pd.to_timedelta(result.a, unit='ns')
result['c'] = pd.to_datetime(result.c)
assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
frame = DataFrame({'a': [timedelta(23), pd.Timestamp('20130101')]},
dtype=object)
expected = DataFrame({'a': [pd.Timedelta(frame.a[0]).value,
pd.Timestamp(frame.a[1]).value]})
result = pd.read_json(frame.to_json(date_unit='ns'),
dtype={'a': 'int64'})
assert_frame_equal(result, expected, check_index_type=False)
def test_default_handler(self):
value = object()
frame = DataFrame({'a': [7, value]})
expected = DataFrame({'a': [7, str(value)]})
result = pd.read_json(frame.to_json(default_handler=str))
assert_frame_equal(expected, result, check_index_type=False)
def test_default_handler_indirect(self):
from pandas.io.json import dumps
def default(obj):
if isinstance(obj, complex):
return [('mathjs', 'Complex'),
('re', obj.real),
('im', obj.imag)]
return str(obj)
df_list = [9, DataFrame({'a': [1, 'STR', complex(4, -5)],
'b': [float('nan'), None, 'N/A']},
columns=['a', 'b'])]
expected = ('[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
'["re",4.0],["im",-5.0]],"N\\/A"]]]')
assert dumps(df_list, default_handler=default,
orient="values") == expected
def test_default_handler_numpy_unsupported_dtype(self):
# GH12554 to_json raises 'Unhandled numpy dtype 15'
df = DataFrame({'a': [1, 2.3, complex(4, -5)],
'b': [float('nan'), None, complex(1.2, 0)]},
columns=['a', 'b'])
expected = ('[["(1+0j)","(nan+0j)"],'
'["(2.3+0j)","(nan+0j)"],'
'["(4-5j)","(1.2+0j)"]]')
assert df.to_json(default_handler=str, orient="values") == expected
def test_default_handler_raises(self):
def my_handler_raises(obj):
raise TypeError("raisin")
pytest.raises(TypeError,
DataFrame({'a': [1, 2, object()]}).to_json,
default_handler=my_handler_raises)
pytest.raises(TypeError,
DataFrame({'a': [1, 2, complex(4, -5)]}).to_json,
default_handler=my_handler_raises)
def test_categorical(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
df["B"] = df["A"]
expected = df.to_json()
df["B"] = df["A"].astype('category')
assert expected == df.to_json()
s = df["A"]
sc = df["B"]
assert s.to_json() == sc.to_json()
def test_datetime_tz(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
tz_range = pd.date_range('20130101', periods=3, tz='US/Eastern')
tz_naive = tz_range.tz_convert('utc').tz_localize(None)
df = DataFrame({
'A': tz_range,
'B': pd.date_range('20130101', periods=3)})
df_naive = df.copy()
df_naive['A'] = tz_naive
expected = df_naive.to_json()
assert expected == df.to_json()
stz = Series(tz_range)
s_naive = Series(tz_naive)
assert stz.to_json() == s_naive.to_json()
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
df.loc[:8] = np.nan
sdf = df.to_sparse()
expected = df.to_json()
assert expected == sdf.to_json()
s = pd.Series(np.random.randn(10))
s.loc[:8] = np.nan
ss = s.to_sparse()
expected = s.to_json()
assert expected == ss.to_json()
def test_tz_is_utc(self):
from pandas.io.json import dumps
exp = '"2013-01-10T05:00:00.000Z"'
ts = Timestamp('2013-01-10 05:00:00Z')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
ts = Timestamp('2013-01-10 00:00:00', tz='US/Eastern')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
ts = Timestamp('2013-01-10 00:00:00-0500')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
def test_tz_range_is_utc(self):
from pandas.io.json import dumps
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
dfexp = ('{"DT":{'
'"0":"2013-01-01T05:00:00.000Z",'
'"1":"2013-01-02T05:00:00.000Z"}}')
tz_range = pd.date_range('2013-01-01 05:00:00Z', periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
tz_range = pd.date_range('2013-01-01 00:00:00', periods=2,
tz='US/Eastern')
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
tz_range = pd.date_range('2013-01-01 00:00:00-0500', periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
def test_read_jsonl(self):
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_read_jsonl_unicode_chars(self):
# GH15132: non-ascii unicode characters
# \u201d == RIGHT DOUBLE QUOTATION MARK
# simulate file handle
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
json = StringIO(json)
result = read_json(json, lines=True)
expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]],
columns=['a', 'b'])
assert_frame_equal(result, expected)
# simulate string
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
result = read_json(json, lines=True)
expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]],
columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_to_jsonl(self):
# GH9180
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.to_json(orient="records", lines=True)
expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
assert result == expected
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=['a', 'b'])
result = df.to_json(orient="records", lines=True)
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
assert result == expected
assert_frame_equal(pd.read_json(result, lines=True), df)
# GH15096: escaped characters in columns and data
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]],
columns=["a\\", 'b'])
result = df.to_json(orient="records", lines=True)
expected = ('{"a\\\\":"foo\\\\","b":"bar"}\n'
'{"a\\\\":"foo\\"","b":"bar"}')
assert result == expected
assert_frame_equal(pd.read_json(result, lines=True), df)
def test_latin_encoding(self):
if compat.PY2:
tm.assert_raises_regex(
TypeError, r'\[unicode\] is not implemented as a table column')
return
# GH 13774
pytest.skip("encoding not implemented in .to_json(), "
"xref #13774")
values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'a', b'b', b'c'],
[b'EE, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
[b'', b'a', b'b', b'c'],
[b'\xf8\xfc', b'a', b'b', b'c'],
[b'A\xf8\xfc', b'', b'a', b'b', b'c'],
[np.nan, b'', b'b', b'c'],
[b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
def _try_decode(x, encoding='latin-1'):
try:
return x.decode(encoding)
except AttributeError:
return x
# not sure how to remove latin-1 from code in python 2 and 3
values = [[_try_decode(x) for x in y] for y in values]
examples = []
for dtype in ['category', object]:
for val in values:
examples.append(Series(val, dtype=dtype))
def roundtrip(s, encoding='latin-1'):
with ensure_clean('test.json') as path:
s.to_json(path, encoding=encoding)
retr = read_json(path, encoding=encoding)
assert_series_equal(s, retr, check_categorical=False)
for s in examples:
roundtrip(s)
def test_data_frame_size_after_to_json(self):
# GH15344
df = DataFrame({'a': [str(1)]})
size_before = df.memory_usage(index=True, deep=True).sum()
df.to_json()
size_after = df.memory_usage(index=True, deep=True).sum()
assert size_before == size_after
|
bsd-3-clause
|
reynoldsnlp/bayes-morph-soc-net
|
artificial-data/generate_tables.py
|
1
|
10494
|
"""Generate artificial input morphological tables."""
from collections import Counter
from pathlib import Path
from pprint import pprint
from random import choice
from random import choices
from statistics import mean
from string import ascii_lowercase
from string import ascii_uppercase
# import sys
from matplotlib import pyplot as plt
from networkx import Graph
from morph_nx import MorphGraph
Path('autogen').mkdir(exist_ok=True)
class MorphTable():
"""Tab-separated morphological table of the following format:
typeFreq A B C D E F
NA a x jj uu ww ttt
NA a m jj vv ww iii
NA b m y vv xx iii
NA b n y kk xx jjj
NA c n z kk yy jjj
NA c o z ll yy kkk
...
"""
def __init__(self, source=None, e_min=2, e_max=24, num_cells=6,
num_classes=24):
"""Generate a morphological table."""
self.source = source
self.e_min = e_min
self.e_max = e_max
self.num_cells = num_cells
self.num_classes = num_classes
self.AMs = self.gen_AMs()
if self.source is None:
self.AM_dict = {i: [next(self.AMs)
for _ in range(choice(range(e_min, e_max)))]
for i in range(num_cells)}
self.gen_random_table()
self.get_metrics()
elif isinstance(self.source, Graph): # networkx Graph
self.get_table_from_graph()
self.get_metrics()
try:
assert self.mean_degree == self.source.avg_deg()
assert self.mean_edge_weight == self.source.avg_wght()
except AssertionError as e:
print(f'MorphGraph metrics:\t{self.source.avg_deg():.4f}\t'
f'{self.source.avg_wght():.4f}')
print(f'MorphTable metrics:\t{self.mean_degree:.4f}\t'
f'{self.mean_edge_weight:.4f}')
# raise e
elif isinstance(self.source, str): # filename
self.get_table_from_file()
self.get_metrics()
else:
print(type(self.source))
raise NotImplementedError
def __repr__(self):
repr = ['typeFreq\t' + '\t'.join(ascii_uppercase[:self.num_cells])]
for r in self.tbl:
repr.append('NA\t' + '\t'.join(r))
return '\n'.join(repr)
def gen_random_table(self):
"""Generate table randomly."""
self.tbl = set()
while len(self.tbl) < self.num_classes:
self.tbl.add(tuple(choice(self.AM_dict[i])
for i in range(self.num_cells)))
self.tbl = list(self.tbl)
def get_table_from_graph(self):
"""Generate table structure from MorphGraph (custom networkx Graph)."""
self.tbl = [[None] * self.num_cells for _ in range(self.num_classes)]
g = self.source
# Place exponents in the table
for u, v, d in g.edges(data=True):
shared_MSPSs = d['weight']
for msps in shared_MSPSs:
rows = {u, v} # all rows that share this exp
just_saw_a_new_one = True
while just_saw_a_new_one:
just_saw_a_new_one = False
for U, V, D in g.edges(data=True):
if U in rows and msps in D['weight'] and V not in rows:
rows.add(V)
just_saw_a_new_one = True
if V in rows and msps in D['weight'] and U not in rows:
rows.add(U)
just_saw_a_new_one = True
existing_exps = set(self.tbl[r][msps] for r in rows) - {None}
lee = len(existing_exps)
if lee == 0:
exponent = '_' + next(self.AMs)
if lee == 1:
continue
elif lee > 1:
pprint(self.tbl)
print(rows, msps)
raise AttributeError('Whaaaaa?!')
for r in rows:
self.tbl[r][msps] = exponent
self.tbl = [[e or next(self.AMs) for e in row] for row in self.tbl]
def get_table_from_file(self):
"""Import table structure from tab-separated file."""
with Path(self.source).open() as f:
self.tbl = []
for line in f.readlines()[1:]:
self.tbl.append(tuple(line.strip().split('\t')[1:]))
assert len(set([len(r) for r in self.tbl])) == 1
self.num_cells = len(self.tbl[0])
self.num_classes = len(self.tbl)
def get_col(self, index):
"""Return column of `self.tbl`."""
return [row[index] for row in self.tbl]
def get_matching_rows(self, msps, exponent):
return [i for i, row in enumerate(self.tbl)
if self.tbl[i][msps] == exponent]
@staticmethod
def gen_AMs():
"""Generate list of unique 'allomorphs'."""
gem = 0 # duplication factor
while True:
gem += 1
for c in ascii_lowercase:
yield c * gem
def get_metrics(self):
"""Calculate mean degree and mean edge weight.
Mean degree - On average, how many nodes is each node connected to
Mean edge weight - On average, how many classes share each exponent
"""
degs = []
weights = []
for row in range(self.num_classes):
cell_counter = Counter()
for other_row in range(self.num_classes):
if other_row != row:
for msps in range(self.num_cells):
if self.tbl[other_row][msps] == self.tbl[row][msps]:
cell_counter.update([other_row])
degs.append(len(cell_counter))
weights.extend(cell_counter.values())
md = mean(degs)
self.mean_degree = md
self.norm_mean_degree = md / (self.num_classes - 1)
mew = mean(weights)
self.mean_edge_weight = mew
self.norm_mean_edge_weight = mew / (self.num_cells - 1)
def mutate(self, MSPSs=1, cells=1, guaranteed=False):
"""Mutate table by changing x exponents in each MSPS.
x -- How many cells to change per MSPS
prob -- Probability that any given MSPS will actually be changed
guaranteed -- Guarantee that exponent is not replaced by itself, unless
there is only one exponent for that MSPS
"""
for msps in choices(range(self.num_cells), k=MSPSs):
already_changed = set()
for _ in range(cells):
pool = [row[msps] for row in self.tbl]
victim_row = choice(range(self.num_classes))
while ((msps, victim_row) in already_changed
and len(already_changed) < len(self.num_classes)):
victim_row = choice(range(self.num_classes))
new_exp = choice(pool)
if guaranteed and len(set(pool)) > 1:
while self.tbl[victim_row][msps] == new_exp:
new_exp = choice(pool)
new_row = list(self.tbl[victim_row])
new_row[msps] = new_exp
self.tbl[victim_row] = tuple(new_row)
already_changed.add((msps, victim_row))
self.get_metrics()
if __name__ == '__main__':
NORM_LIMS = (-0.1, 1.1)
from_nx = []
for i in range(1, 24):
for j in range(1, 6):
print(i, j)
g = MorphGraph(24, i, j)
print(g.avg_deg(), g.avg_wght())
from_nx.append(MorphTable(g))
nx_mean_degs = [mt.norm_mean_degree for mt in from_nx]
nx_mean_weights = [mt.norm_mean_edge_weight for mt in from_nx]
plt.scatter(nx_mean_degs, nx_mean_weights, marker='o', color='r')
plt.title('Random generation of tables from networkx Graphs')
plt.xlabel('Mean degree')
plt.ylabel('Mean edge weight')
plt.xlim(NORM_LIMS)
plt.ylim(NORM_LIMS)
plt.show()
jeffs = [MorphTable(fname) for fname in Path('.').glob('data_6.*')]
orig_mean_degs = [mt.norm_mean_degree for mt in jeffs]
orig_mean_weights = [mt.norm_mean_edge_weight for mt in jeffs]
plt.scatter(orig_mean_degs, orig_mean_weights, marker='o', color='r')
mean_degs = []
mean_weights = []
for mt in jeffs:
for i in range(100):
mt.mutate()
mean_degs.append(mt.norm_mean_degree)
mean_weights.append(mt.norm_mean_edge_weight)
# print(f'writing file {i}...', file=sys.stderr)
# with Path(f'autogen/{str(i).zfill(4)}.txt').open('w') as f:
# print(mt, file=f)
plt.scatter(mean_degs, mean_weights, marker='.')
plt.title('Incremental random mutation of 6.x tables')
plt.xlabel('Normalized mean degree')
plt.ylabel('Normalized mean edge weight')
plt.xlim(NORM_LIMS)
plt.ylim(NORM_LIMS)
# axes = plt.gca()
# xlim = axes.get_xlim()
# ylim = axes.get_ylim()
plt.show()
mean_degs = []
mean_weights = []
for i in range(1000):
mt = MorphTable()
mean_degs.append(mt.norm_mean_degree)
mean_weights.append(mt.norm_mean_edge_weight)
plt.scatter(mean_degs, mean_weights, marker='.')
plt.title('Random generation of tables')
plt.xlabel('Mean degree')
plt.ylabel('Mean edge weight')
plt.xlim(NORM_LIMS)
plt.ylim(NORM_LIMS)
plt.show()
langs = [(MorphTable(f), f.stem)
for f in Path('../language-data').glob('*.txt')]
lang_mean_degs = [mt.norm_mean_degree for mt, name in langs]
lang_mean_weights = [mt.norm_mean_edge_weight for mt, name in langs]
lang_names = [name for mt, name in langs]
fix, ax = plt.subplots()
ax.scatter(lang_mean_degs, lang_mean_weights, marker='o', color='b')
for x, y, txt in zip(lang_mean_degs, lang_mean_weights, lang_names):
ax.annotate(txt, (x, y))
mean_degs = []
mean_weights = []
for mt, name in langs:
for i in range(100):
mt.mutate()
mean_degs.append(mt.norm_mean_degree)
mean_weights.append(mt.norm_mean_edge_weight)
plt.scatter(mean_degs, mean_weights, marker='.')
plt.title('Incremental random mutation of natural language tables')
plt.xlabel('Normalized mean degree')
plt.ylabel('Normalized mean edge weight')
plt.xlim(NORM_LIMS)
plt.ylim(NORM_LIMS)
plt.show()
|
gpl-3.0
|
alvarofierroclavero/scikit-learn
|
examples/classification/plot_digits_classification.py
|
289
|
2397
|
"""
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
|
bsd-3-clause
|
mne-tools/mne-tools.github.io
|
stable/_downloads/e2ab6e33a74484d0a12d24af03cb3f79/time_frequency_simulated.py
|
18
|
8475
|
"""
======================================================================
Time-frequency on simulated data (Multitaper vs. Morlet vs. Stockwell)
======================================================================
This example demonstrates the different time-frequency estimation methods
on simulated data. It shows the time-frequency resolution trade-off
and the problem of estimation variance. In addition it highlights
alternative functions for generating TFRs without averaging across
trials, or by operating on numpy arrays.
"""
# Authors: Hari Bharadwaj <[email protected]>
# Denis Engemann <[email protected]>
# Chris Holdgraf <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from matplotlib import pyplot as plt
from mne import create_info, EpochsArray
from mne.baseline import rescale
from mne.time_frequency import (tfr_multitaper, tfr_stockwell, tfr_morlet,
tfr_array_morlet)
from mne.viz import centers_to_edges
print(__doc__)
###############################################################################
# Simulate data
# -------------
#
# We'll simulate data with a known spectro-temporal structure.
sfreq = 1000.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = 1024 # Just over 1 second epochs
n_epochs = 40
seed = 42
rng = np.random.RandomState(seed)
noise = rng.randn(n_epochs, len(ch_names), n_times)
# Add a 50 Hz sinusoidal burst to the noise and ramp it.
t = np.arange(n_times, dtype=np.float64) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
data = noise + signal
reject = dict(grad=4000)
events = np.empty((n_epochs, 3), dtype=int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=data, info=info, events=events, event_id=event_id,
reject=reject)
epochs.average().plot()
###############################################################################
# Calculate a time-frequency representation (TFR)
# -----------------------------------------------
#
# Below we'll demonstrate the output of several TFR functions in MNE:
#
# * :func:`mne.time_frequency.tfr_multitaper`
# * :func:`mne.time_frequency.tfr_stockwell`
# * :func:`mne.time_frequency.tfr_morlet`
#
# Multitaper transform
# ====================
# First we'll use the multitaper method for calculating the TFR.
# This creates several orthogonal tapering windows in the TFR estimation,
# which reduces variance. We'll also show some of the parameters that can be
# tweaked (e.g., ``time_bandwidth``) that will result in different multitaper
# properties, and thus a different TFR. You can trade time resolution or
# frequency resolution or both in order to get a reduction in variance.
freqs = np.arange(5., 100., 3.)
vmin, vmax = -3., 3. # Define our color limits.
###############################################################################
# **(1) Least smoothing (most variance/background fluctuations).**
n_cycles = freqs / 2.
time_bandwidth = 2.0 # Least possible frequency-smoothing (1 taper)
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Least smoothing, most variance')
###############################################################################
# **(2) Less frequency smoothing, more time smoothing.**
n_cycles = freqs # Increase time-window length to 1 second.
time_bandwidth = 4.0 # Same frequency-smoothing as (1) 3 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less frequency smoothing, more time smoothing')
###############################################################################
# **(3) Less time smoothing, more frequency smoothing.**
n_cycles = freqs / 2.
time_bandwidth = 8.0 # Same time-smoothing as (1), 7 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less time smoothing, more frequency smoothing')
##############################################################################
# Stockwell (S) transform
# =======================
#
# Stockwell uses a Gaussian window to balance temporal and spectral resolution.
# Importantly, frequency bands are phase-normalized, hence strictly comparable
# with regard to timing, and, the input signal can be recoverd from the
# transform in a lossless way if we disregard numerical errors. In this case,
# we control the spectral / temporal resolution by specifying different widths
# of the gaussian window using the ``width`` parameter.
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
fmin, fmax = freqs[[0, -1]]
for width, ax in zip((0.2, .7, 3.0), axs):
power = tfr_stockwell(epochs, fmin=fmin, fmax=fmax, width=width)
power.plot([0], baseline=(0., 0.1), mode='mean', axes=ax, show=False,
colorbar=False)
ax.set_title('Sim: Using S transform, width = {:0.1f}'.format(width))
plt.tight_layout()
###############################################################################
# Morlet Wavelets
# ===============
#
# Finally, show the TFR using morlet wavelets, which are a sinusoidal wave
# with a gaussian envelope. We can control the balance between spectral and
# temporal resolution with the ``n_cycles`` parameter, which defines the
# number of cycles to include in the window.
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
all_n_cycles = [1, 3, freqs / 2.]
for n_cycles, ax in zip(all_n_cycles, axs):
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False)
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
axes=ax, show=False, colorbar=False)
n_cycles = 'scaled by freqs' if not isinstance(n_cycles, int) else n_cycles
ax.set_title('Sim: Using Morlet wavelet, n_cycles = %s' % n_cycles)
plt.tight_layout()
###############################################################################
# Calculating a TFR without averaging over epochs
# -----------------------------------------------
#
# It is also possible to calculate a TFR without averaging across trials.
# We can do this by using ``average=False``. In this case, an instance of
# :class:`mne.time_frequency.EpochsTFR` is returned.
n_cycles = freqs / 2.
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False, average=False)
print(type(power))
avgpower = power.average()
avgpower.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Using Morlet wavelets and EpochsTFR', show=False)
###############################################################################
# Operating on arrays
# -------------------
#
# MNE also has versions of the functions above which operate on numpy arrays
# instead of MNE objects. They expect inputs of the shape
# ``(n_epochs, n_channels, n_times)``. They will also return a numpy array
# of shape ``(n_epochs, n_channels, n_freqs, n_times)``.
power = tfr_array_morlet(epochs.get_data(), sfreq=epochs.info['sfreq'],
freqs=freqs, n_cycles=n_cycles,
output='avg_power')
# Baseline the output
rescale(power, epochs.times, (0., 0.1), mode='mean', copy=False)
fig, ax = plt.subplots()
x, y = centers_to_edges(epochs.times * 1000, freqs)
mesh = ax.pcolormesh(x, y, power[0], cmap='RdBu_r', vmin=vmin, vmax=vmax)
ax.set_title('TFR calculated on a numpy array')
ax.set(ylim=freqs[[0, -1]], xlabel='Time (ms)')
fig.colorbar(mesh)
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
untom/scikit-learn
|
examples/linear_model/plot_multi_task_lasso_support.py
|
249
|
2211
|
#!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
|
bsd-3-clause
|
ChinaQuants/bokeh
|
bokeh/util/serialization.py
|
31
|
7419
|
""" Functions for helping with serialization and deserialization of
Bokeh objects.
"""
from __future__ import absolute_import
from six import iterkeys
is_numpy = None
try:
import numpy as np
is_numpy = True
except ImportError:
is_numpy = False
try:
import pandas as pd
is_pandas = True
except ImportError:
is_pandas = False
import logging
log = logging.getLogger(__name__)
_simple_id = 1000
def make_id():
""" Return a new unique ID for a Bokeh object.
Normally this function will return UUIDs to use for identifying Bokeh
objects. This is especally important for Bokeh objects stored on a
Bokeh server. However, it is convenient to have more human-readable
IDs during development, so this behavior can be overridden by
setting the environment variable ``BOKEH_SIMPLE_IDS=yes``.
"""
global _simple_id
import uuid
from ..settings import settings
if settings.simple_ids(False):
_simple_id += 1
new_id = _simple_id
else:
new_id = uuid.uuid4()
return str(new_id)
def urljoin(*args):
""" Construct an absolute URL from several URL components.
Args:
*args (str) : URL components to join
Returns:
str : joined URL
"""
from six.moves.urllib.parse import urljoin as sys_urljoin
from functools import reduce
return reduce(sys_urljoin, args)
def get_json(response):
""" Unify retrieving JSON responses from different sources.
Works correctly for HTTP responses from requests <=1.0, >1.0, and
the Flask test client.
Args:
response (Flask or requests response) : a response to process
Returns:
JSON
"""
import json
try:
import flask
except ImportError:
flask = None
if flask and isinstance(response, flask.Response):
# flask testing
return json.loads(response.data.decode('utf-8'))
else:
# requests
if hasattr(response.json, '__call__'):
return response.json()
else:
return response.json
def dump(objs, docid, changed_only=True):
""" Serialize a sequence of Bokeh objects into JSON
Args:
objs (seq[obj]) : a sequence of Bokeh object to dump
docid (str) : an ID for a Bokeh Document to dump relative to
changed_only (bool, optional) : whether to dump only attributes
that have had their values changed at some point (default: True)
Returns:
list[json]
"""
json_objs = []
for obj in objs:
ref = obj.ref
ref["attributes"] = obj.vm_serialize(changed_only=changed_only)
ref["attributes"].update({"id": ref["id"], "doc" : docid})
json_objs.append(ref)
return json_objs
def is_ref(frag):
""" Test whether a given Bokeh object graph fragment is a reference.
A Bokeh "reference" is a ``dict`` with ``"type"`` and ``"id"`` keys.
Args:
frag (dict) : a fragment of a Bokeh object graph
Returns:
True, if the fragment is a reference, otherwise False
"""
return isinstance(frag, dict) and \
frag.get('type') and \
frag.get('id')
def json_apply(fragment, check_func, func):
""" Apply a function to JSON fragments that match the given predicate
and return the collected results.
Recursively traverses a nested collection of ``dict`` and ``list``,
applying ``check_func`` to each fragment. If True, then collect
``func(fragment)`` in the final output
Args:
fragment (JSON-like) : the fragment to apply ``func`` to recursively
check_func (callable) : the predicate to test fragments with
func (callable) : the conversion function to apply
Returns:
converted fragments
"""
if check_func(fragment):
return func(fragment)
elif isinstance(fragment, list):
output = []
for val in fragment:
output.append(json_apply(val, check_func, func))
return output
elif isinstance(fragment, dict):
output = {}
for k, val in fragment.items():
output[k] = json_apply(val, check_func, func)
return output
else:
return fragment
def transform_series(obj):
"""transforms pandas series into array of values
"""
vals = obj.values
return transform_array(vals)
def transform_array(obj):
"""Transform arrays into lists of json safe types
also handles pandas series, and replacing
nans and infs with strings
"""
# Check for astype failures (putative Numpy < 1.7)
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') ==
dt2001.astype('datetime64[ms]').astype('int64'))
## not quite correct, truncates to ms..
if obj.dtype.kind == 'M':
if legacy_datetime64:
if obj.dtype == np.dtype('datetime64[ns]'):
return (obj.astype('int64') / 10**6.0).tolist()
else:
return (obj.astype('datetime64[us]').astype('int64') / 1000.).tolist()
elif obj.dtype.kind in ('u', 'i', 'f'):
return transform_numerical_array(obj)
return obj.tolist()
def transform_numerical_array(obj):
"""handles nans/inf conversion
"""
if isinstance(obj, np.ma.MaskedArray):
obj = obj.filled(np.nan) # Set masked values to nan
if not np.isnan(obj).any() and not np.isinf(obj).any():
return obj.tolist()
else:
transformed = obj.astype('object')
transformed[np.isnan(obj)] = 'NaN'
transformed[np.isposinf(obj)] = 'Infinity'
transformed[np.isneginf(obj)] = '-Infinity'
return transformed.tolist()
def traverse_data(datum, is_numpy=is_numpy, use_numpy=True):
"""recursively dig until a flat list is found
if numpy is available convert the flat list to a numpy array
and send off to transform_array() to handle nan, inf, -inf
otherwise iterate through items in array converting non-json items
Args:
datum (list) : a list of values or lists
is_numpy: True if numpy is present (see imports)
use_numpy: toggle numpy as a dependency for testing purposes
"""
is_numpy = is_numpy and use_numpy
if is_numpy and not any(isinstance(el, (list, tuple)) for el in datum):
return transform_array(np.asarray(datum))
datum_copy = []
for item in datum:
if isinstance(item, (list, tuple)):
datum_copy.append(traverse_data(item))
elif isinstance(item, float):
if np.isnan(item):
item = 'NaN'
elif np.isposinf(item):
item = 'Infinity'
elif np.isneginf(item):
item = '-Infinity'
datum_copy.append(item)
else:
datum_copy.append(item)
return datum_copy
def transform_column_source_data(data):
"""iterate through the data of a ColumnSourceData object replacing
non-JSON-compliant objects with compliant ones
"""
data_copy = {}
for key in iterkeys(data):
if is_pandas and isinstance(data[key], (pd.Series, pd.Index)):
data_copy[key] = transform_series(data[key])
elif isinstance(data[key], np.ndarray):
data_copy[key] = transform_array(data[key])
else:
data_copy[key] = traverse_data(data[key])
return data_copy
|
bsd-3-clause
|
petosegan/scikit-learn
|
benchmarks/bench_lasso.py
|
297
|
3305
|
"""
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
andim/scipy
|
scipy/stats/_distn_infrastructure.py
|
4
|
113604
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy._lib.six import string_types, exec_
from scipy._lib._util import getargspec_no_self as _getargspec
import sys
import keyword
import re
import types
import warnings
from scipy.misc import doccer
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy.special import (comb, chndtr, gammaln, entr, kl_div, xlogy, ive)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, take, ones, sum, shape,
product, reshape, zeros, floor, logical_and, log, sqrt, exp,
ndarray)
from numpy import (place, any, argsort, argmax, vectorize,
asarray, nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _EPS, _XMAX
try:
from new import instancemethod
except ImportError:
# Python 3
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
``rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)``
Random variates.
"""
_doc_pdf = """\
``pdf(x, %(shapes)s, loc=0, scale=1)``
Probability density function.
"""
_doc_logpdf = """\
``logpdf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability density function.
"""
_doc_pmf = """\
``pmf(x, %(shapes)s, loc=0, scale=1)``
Probability mass function.
"""
_doc_logpmf = """\
``logpmf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability mass function.
"""
_doc_cdf = """\
``cdf(x, %(shapes)s, loc=0, scale=1)``
Cumulative density function.
"""
_doc_logcdf = """\
``logcdf(x, %(shapes)s, loc=0, scale=1)``
Log of the cumulative density function.
"""
_doc_sf = """\
``sf(x, %(shapes)s, loc=0, scale=1)``
Survival function (``1 - cdf`` --- sometimes more accurate).
"""
_doc_logsf = """\
``logsf(x, %(shapes)s, loc=0, scale=1)``
Log of the survival function.
"""
_doc_ppf = """\
``ppf(q, %(shapes)s, loc=0, scale=1)``
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
``isf(q, %(shapes)s, loc=0, scale=1)``
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
``moment(n, %(shapes)s, loc=0, scale=1)``
Non-central moment of order n
"""
_doc_stats = """\
``stats(%(shapes)s, loc=0, scale=1, moments='mv')``
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
``entropy(%(shapes)s, loc=0, scale=1)``
(Differential) entropy of the RV.
"""
_doc_fit = """\
``fit(data, %(shapes)s, loc=0, scale=1)``
Parameter estimates for generic data.
"""
_doc_expect = """\
``expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
``expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
``median(%(shapes)s, loc=0, scale=1)``
Median of the distribution.
"""
_doc_mean = """\
``mean(%(shapes)s, loc=0, scale=1)``
Mean of the distribution.
"""
_doc_var = """\
``var(%(shapes)s, loc=0, scale=1)``
Variance of the distribution.
"""
_doc_std = """\
``std(%(shapes)s, loc=0, scale=1)``
Standard deviation of the distribution.
"""
_doc_interval = """\
``interval(alpha, %(shapes)s, loc=0, scale=1)``
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._ctor_param)
# a, b may be set in _argcheck, depending on *args, **kwds. Ouch.
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.dist._argcheck(*shapes)
self.a, self.b = self.dist.a, self.dist.b
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None,
conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
if kwds:
raise ValueError("Discrete expect does not accept **kwds.")
return self.dist.expect(func, a, loc, lb, ub, conditional)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def valarray(shape, value=nan, typecode=None):
"""Return an array of all value.
"""
out = ones(shape, dtype=bool) * value
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, ndarray):
out = asarray(out)
return out
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
"""
np.where(cond, x, fillvalue) always evaluates x even where cond is False.
This one only evaluates f(arr1[cond], arr2[cond], ...).
For example,
>>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
>>> def f(a, b):
return a*b
>>> _lazywhere(a > 2, (a, b), f, np.nan)
array([ nan, nan, 21., 32.])
Notice it assumes that all `arrays` are of the same shape, or can be
broadcasted together.
"""
if fillvalue is None:
if f2 is None:
raise ValueError("One of (fillvalue, f2) must be given.")
else:
fillvalue = np.nan
else:
if f2 is not None:
raise ValueError("Only one of (fillvalue, f2) can be given.")
arrays = np.broadcast_arrays(*arrays)
temp = tuple(np.extract(cond, arr) for arr in arrays)
out = valarray(shape(arrays[0]), value=fillvalue)
np.place(out, cond, f(*temp))
if f2 is not None:
temp = tuple(np.extract(~cond, arr) for arr in arrays)
np.place(out, ~cond, f2(*temp))
return out
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4, 5))
>>> B = 2
>>> C = rand((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2, B2, C2] = argsreduce(cond, A, B, C)
>>> B2.shape
(15,)
"""
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
expand_arr = (cond == cond)
return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return (%(shape_arg_str)s), %(locscale_out)s, size
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# I think the function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the factor
# of exp(-xs*ns) into the ive function to improve numerical stability
# at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
res += np.log(ive(df2, xs*ns) / 2.0)
return res
def _ncx2_pdf(x, df, nc):
return np.exp(_ncx2_log_pdf(x, df, nc))
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super(rv_generic, self).__init__()
# figure out if _stats signature has 'moments' keyword
sign = _getargspec(self._stats)
self._stats_has_moments = ((sign[2] is not None) or
('moments' in sign[0]))
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getargspec(meth) # NB: does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Central moments
def _munp(self, n, *args):
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = self.generic_moment(n, *args)
np.seterr(**olderr)
return vals
## These are the methods you must define (standard form functions)
## NB: generic _pdf, _logpdf, _cdf are different for
## rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
##(return 1-d using self._size to get number)
def _rvs(self, *args):
## Use basic inverse cdf algorithm for RV generation as default.
U = self._random_state.random_sample(self._size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
# self._size is total size of all output values
self._size = product(size, axis=0)
if self._size is not None and self._size > 1:
size = np.array(size, ndmin=1)
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
self._random_state = check_random_state(rndm)
vals = self._rvs(*args)
if self._size is not None:
vals = reshape(vals, size)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if np.isscalar(vals):
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""
Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = valarray(shape(cond), self.badvalue)
# Use only entries that are valid in calculation
if any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
if g2 is None:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
mu2 = mu2p - mu * mu
if np.isinf(mu):
#if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
mu3 = mu3p - 3 * mu * mu2 - mu**3
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
mu3 = mu3p - 3 * mu * mu2 - mu**3
mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
# np.vectorize doesn't work when numargs == 0 in numpy 1.6.2. Once the
# lowest supported numpy version is >= 1.7.0, this special case can be
# removed (see gh-4314).
if self.numargs == 0:
place(output, cond0, self._entropy() + log(scale))
else:
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def moment(self, n, *args, **kwds):
"""
n-th order non-central moment of distribution.
Parameters
----------
n : int, n >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
stats.distributions.rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of a distribution, ``self.a <= x <= self.b``.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_continuous, self).__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self._size = 1
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
# backwards compat. these were removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vecfunc = np.deprecate(self._ppfvec, "vecfunc")
self.veccdf = np.deprecate(self._cdfvec, "veccdf")
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q, *args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q, *args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
return integrate.quad(self._mom_integ0, self.a, self.b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
## in rv_generic
def pdf(self, x, *args, **kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""
Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -sum(self._logpdf(x, *args), axis=0)
def nnlf(self, theta, x):
'''Return negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
cond0 = (x <= self.a) | (self.b <= x)
if (any(cond0)):
return inf
else:
N = len(x)
return self._nnlf(x, *args) + N * log(scale)
def _penalized_nnlf(self, theta, x):
''' Return negative loglikelihood function,
i.e., - sum (log pdf(x, theta), axis=0)
where theta are the parameters (including loc and scale)
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
loginf = log(_XMAX)
if np.isneginf(self.a).all() and np.isinf(self.b).all():
Nbad = 0
else:
cond0 = (x <= self.a) | (self.b <= x)
Nbad = sum(cond0)
if Nbad > 0:
x = argsreduce(~cond0, x)[0]
N = len(x)
return self._nnlf(x, *args) + N*log(scale) + Nbad * 100.0 * loginf
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
# First of all, convert fshapes params to fnum: eg for stats.beta,
# shapes='a, b'. To fix `a`, can specify either `f1` or `fa`.
# Convert the latter into the former.
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
val = kwds.pop('f' + s, None) or kwds.pop('fix_' + s, None)
if val is not None:
key = 'f%d' % j
if key in kwds:
raise ValueError("Duplicate entry for %s." % key)
else:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self._penalized_nnlf
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self._penalized_nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape, location, and scale parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa``and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use. The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
Returns
-------
shape, loc, scale : tuple of floats
MLEs for any shape statistics, followed by those for location and
scale.
Notes
-----
This fit is computed by maximizing a log-likelihood function, with
penalty applied for samples outside of range of the distribution. The
returned answer is not guaranteed to be the globally optimal MLE, it
may only be locally optimal, or the optimization may fail altogether.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
"""
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, string_types):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def _fit_loc_scale_support(self, data, *args):
"""
Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
a, b = self.a, self.b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
@np.deprecate
def est_loc_scale(self, data, *args):
"""This function is deprecated, use self.fit_loc_scale(data) instead.
"""
return self.fit_loc_scale(data, *args)
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
olderr = np.seterr(over='ignore')
h = integrate.quad(integ, self.a, self.b)[0]
np.seterr(**olderr)
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ubound
E[x] = Integral(f(x) * dist.pdf(x))
lbound
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`integrate.quad`.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
np.seterr(**olderr)
return vals
## Handlers for generic case where xk and pk are given
## The _drv prefix probably means discrete random variable.
def _drv_pmf(self, xk, *args):
try:
return self.P[xk]
except KeyError:
return 0.0
def _drv_cdf(self, xk, *args):
indx = argmax((self.xk > xk), axis=-1)-1
return self.F[self.xk[indx]]
def _drv_ppf(self, q, *args):
indx = argmax((self.qvals >= q), axis=-1)
return self.Finv[self.qvals[indx]]
def _drv_nonzero(self, k, *args):
return 1
def _drv_moment(self, n, *args):
n = asarray(n)
return sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _drv_moment_gen(self, t, *args):
t = asarray(t)
return sum(exp(self.xk * t[np.newaxis, ...]) * self.pk, axis=0)
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
# many changes, originally not even a return
tot = 0.0
diff = 1e100
# pos = self.a
pos = max(0.0, 1.0*self.a)
count = 0
# handle cases with infinite support
ulimit = max(1000, (min(self.b, 1000) + max(self.a, -1000))/2.0)
llimit = min(-1000, (min(self.b, 1000) + max(self.a, -1000))/2.0)
while (pos <= self.b) and ((pos <= ulimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos, *args)
# use pmf because _pmf does not check support in randint and there
# might be problems ? with correct self.a, self.b at this stage
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = -self.inc
while (pos >= self.a) and ((pos >= llimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos, *args)
# using pmf instead of _pmf, see above
tot += diff
pos -= self.inc
count += 1
return tot
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.b
a = self.a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= self.b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= self.a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
# testcase: return wrong number at lower index
# python -c "from scipy.stats import zipf;print zipf.ppf(0.01, 2)" wrong
# python -c "from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)"
# python -c "from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
def entropy(pk, qk=None, base=None):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=0)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=0)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
Returns
-------
S : float
The calculated entropy.
"""
pk = asarray(pk)
pk = 1.0*pk / sum(pk, axis=0)
if qk is None:
vec = entr(pk)
else:
qk = asarray(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / sum(qk, axis=0)
vec = kl_div(pk, qk)
S = sum(vec, axis=0)
if base is not None:
S /= log(base)
return S
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers with non-zero
probabilities ``pk`` with ``sum(pk) = 1``.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None, the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
Notes
-----
This class is similar to `rv_continuous`, the main differences being:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.a = a
self.b = b
self.name = name
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.return_integers = 1
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
self.extradoc = extradoc
if values is not None:
self.xk, self.pk = values
self.return_integers = 0
indx = argsort(ravel(self.xk))
self.xk = take(ravel(self.xk), indx, 0)
self.pk = take(ravel(self.pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.P = dict(zip(self.xk, self.pk))
self.qvals = np.cumsum(self.pk, axis=0)
self.F = dict(zip(self.xk, self.qvals))
decreasing_keys = sorted(self.F.keys(), reverse=True)
self.Finv = dict((self.F[k], k) for k in decreasing_keys)
self._ppf = instancemethod(vectorize(_drv_ppf, otypes='d'),
self, rv_discrete)
self._pmf = instancemethod(vectorize(_drv_pmf, otypes='d'),
self, rv_discrete)
self._cdf = instancemethod(vectorize(_drv_cdf, otypes='d'),
self, rv_discrete)
self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)
self.generic_moment = instancemethod(_drv_moment,
self, rv_discrete)
self.moment_gen = instancemethod(_drv_moment_gen,
self, rv_discrete)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
else:
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(_vec_generic_moment,
self, rv_discrete)
# backwards compat. was removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vec_generic_moment = np.deprecate(_vec_generic_moment,
"vec_generic_moment",
"generic_moment")
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2 # +1 is for self
self._ppfvec = instancemethod(_vppf,
self, rv_discrete)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
#discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
m = arange(int(self.a), k+1)
return sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""
Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.a-1)
place(output, cond2, self.b)
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.b)
place(output, cond2, self.a-1)
# call place only if at least 1 valid argument
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return entropy(self.pk)
else:
mu = int(self.stats(*args, **{'moments': 'm'}))
val = self.pmf(mu, *args)
ent = entr(val)
k = 1
term = 1.0
while (abs(term) > _EPS):
val = self.pmf(mu+k, *args)
term = entr(val)
val = self.pmf(mu-k, *args)
term += entr(val)
k += 1
ent += term
return ent
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for integration, default is set to the
support of the distribution, inclusive (``ul <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``ul <= k <= ub``).
Default is False.
Returns
-------
expect : float
Expected value.
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to
evaluate could be added as keyword parameter, to evaluate functions
with non-monotonic shapes, points include integers in (-suppnmin,
suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative
integers are evaluated)
"""
# moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
# avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 # minimum number of points to evaluate (+ and -)
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = (self.b)
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) # check limits
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
# handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while ((pos >= lb) and (diff > self.moment_tol) and
count <= maxcount):
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot/invfac
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
|
bsd-3-clause
|
Ichaelus/Github-Classifier
|
Application/Models/ClassificationModules/multinomialnbreadmeonly.py
|
1
|
2861
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Models.FeatureProcessing import *
import sklearn
from sklearn.naive_bayes import MultinomialNB
import numpy as np
import abc
from ClassificationModule import ClassificationModule
class multinomialnbreadmeonly(ClassificationModule):
"""A Multinomial Naive Bayes"""
def __init__(self, text_corpus):
ClassificationModule.__init__(self, "Readme Only Multinomial Naive Bayes", "A Multinomial Naive Bayes-Classifier")
# Create vectorizer and fit on all available Descriptions
self.vectorizer = getTextVectorizer(8000) # Maximum of different columns
corpus = []
for description in text_corpus:
corpus.append(process_text(description))
self.vectorizer.fit(corpus)
self.clf = MultinomialNB()
print "\t-", self.name
def resetAllTraining(self):
"""Reset classification module to status before training"""
self.clf = sklearn.base.clone(self.clf)
def trainOnSample(self, sample, shuffle=True, verbose=True):
"""Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird."""
readme_vec = self.formatInputData(sample)
label_index = getLabelIndex(sample)
return self.clf.fit(readme_vec, np.expand_dims(label_index, axis=0))
def train(self, samples, shuffle=True, verbose=True):
"""Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)"""
train_samples = []
train_lables = []
for sample in samples:
formatted_sample = self.formatInputData(sample)[0].tolist()
train_samples.append(formatted_sample)
train_lables.append(getLabelIndex(sample))
train_lables = np.asarray(train_lables)
train_result = self.clf.fit(train_samples, train_lables)
self.isTrained = True
return train_result
def predictLabel(self, sample):
"""Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde"""
if not self.isTrained:
return 0
sample = self.formatInputData(sample)
return self.clf.predict(sample)[0]
def predictLabelAndProbability(self, sample):
"""Return the probability the module assignes each label"""
if not self.isTrained:
return [0, 0, 0, 0, 0, 0, 0, 0]
sample = self.formatInputData(sample)
prediction = self.clf.predict_proba(sample)[0]
return [np.argmax(prediction)] + list(prediction)
def formatInputData(self, sample):
"""Extract readme and transform to vector"""
sd = getReadme(sample)
# Returns numpy array which contains 1 array with features
return self.vectorizer.transform([sd]).toarray()
|
mit
|
0bserver07/One-Hundred-Layers-Tiramisu
|
train-tiramisu.py
|
1
|
4182
|
from __future__ import absolute_import
from __future__ import print_function
import os
import keras.models as models
from keras.layers.core import Layer, Dense, Dropout, Activation, Flatten, Reshape, Permute
from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D
from keras.layers.normalization import BatchNormalization
from keras.layers import Conv2D, Conv2DTranspose
from keras.optimizers import RMSprop, Adam, SGD
from keras.callbacks import ModelCheckpoint
from keras.layers import Input, merge
from keras.regularizers import l2
from keras.models import Model
from keras import regularizers
from keras.models import Model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, UpSampling2D
from keras import backend as K
from keras import callbacks
import math
# remote = callbacks.RemoteMonitor(root='http://localhost:9000', path='/publish/epoch/end/', field='data', headers=None)
# early_stopping = callbacks.EarlyStopping(monitor='val_loss', patience=50, verbose=0, mode='auto')
# tensor_board = callbacks.TensorBoard(log_dir='./logs', histogram_freq=5, write_graph=True, write_images=True)
K.set_image_dim_ordering('tf')
import cv2
import numpy as np
import json
np.random.seed(7) # 0bserver07 for reproducibility
class_weighting = [
0.2595,
0.1826,
4.5640,
0.1417,
0.5051,
0.3826,
9.6446,
1.8418,
6.6823,
6.2478,
3.0,
7.3614
]
# load the data
train_data = np.load('./data/train_data.npy')
train_data = train_data.reshape((367, 224, 224, 3))
train_label = np.load('./data/train_label.npy')#[:,:,:-1]
test_data = np.load('./data/test_data.npy')
test_data = test_data.reshape((233, 224, 224, 3))
test_label = np.load('./data/test_label.npy')#[:,:,:-1]
# test_label = to_categorical(test_label, num_classes=None)
# load the model:
with open('tiramisu_fc_dense67_model_12_func.json') as model_file:
tiramisu = models.model_from_json(model_file.read())
# section 4.1 from the paper
from keras.callbacks import LearningRateScheduler
# learning rate schedule
def step_decay(epoch):
initial_lrate = 0.001
drop = 0.00001
epochs_drop = 10.0
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
lrate = LearningRateScheduler(step_decay)
# tiramisu.load_weights("weights/prop_tiramisu_weights_67_12_func_10-e5_decay.best.hdf5")
optimizer = RMSprop(lr=0.001, decay=0.0000001)
# optimizer = SGD(lr=0.01)
# optimizer = Adam(lr=1e-3, decay=0.995)
tiramisu.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
# learning schedule callback
# lrate = LearningRateScheduler(step_decay)
# checkpoint 278
TensorBoard = callbacks.TensorBoard(log_dir='./logs', histogram_freq=5, write_graph=True, write_images=True)
# ReduceLROnPlateau = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
filepath="weights/prop_tiramisu_weights_67_12_func_10-e7_decay.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=2,
save_best_only=True, save_weights_only=False, mode='max')
callbacks_list = [checkpoint]
nb_epoch = 150
batch_size = 2
# Fit the model
history = tiramisu.fit(train_data, train_label, batch_size=batch_size, epochs=nb_epoch,
callbacks=callbacks_list, class_weight=class_weighting,verbose=1, validation_data=(test_data, test_label), shuffle=True) # validation_split=0.33
# This save the trained model weights to this file with number of epochs
tiramisu.save_weights('weights/prop_tiramisu_weights_67_12_func_10-e7_decay{}.hdf5'.format(nb_epoch))
import matplotlib.pyplot as plt
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
|
mit
|
Ziqi-Li/bknqgis
|
pandas/pandas/tests/indexes/period/test_formats.py
|
15
|
1545
|
from pandas import PeriodIndex
import numpy as np
import pandas.util.testing as tm
import pandas as pd
def test_to_native_types():
index = PeriodIndex(['2017-01-01', '2017-01-02',
'2017-01-03'], freq='D')
# First, with no arguments.
expected = np.array(['2017-01-01', '2017-01-02',
'2017-01-03'], dtype='<U10')
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
# No NaN values, so na_rep has no effect
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
# Make sure slicing works
expected = np.array(['2017-01-01', '2017-01-03'], dtype='<U10')
result = index.to_native_types([0, 2])
tm.assert_numpy_array_equal(result, expected)
# Make sure date formatting works
expected = np.array(['01-2017-01', '01-2017-02',
'01-2017-03'], dtype='<U10')
result = index.to_native_types(date_format='%m-%Y-%d')
tm.assert_numpy_array_equal(result, expected)
# NULL object handling should work
index = PeriodIndex(['2017-01-01', pd.NaT, '2017-01-03'], freq='D')
expected = np.array(['2017-01-01', 'NaT', '2017-01-03'], dtype=object)
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
expected = np.array(['2017-01-01', 'pandas',
'2017-01-03'], dtype=object)
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
|
gpl-2.0
|
CompPhysics/MachineLearning
|
doc/src/DimRed/Programs/cancerownlogreg.py
|
1
|
1284
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
cancer = load_breast_cancer()
fig, axes = plt.subplots(15,2,figsize=(10,20))
malignant = cancer.data[cancer.target == 0]
benign = cancer.data[cancer.target == 1]
ax = axes.ravel()
for i in range(30):
_, bins = np.histogram(cancer.data[:,i], bins =50)
ax[i].hist(malignant[:,i], bins = bins, alpha = 0.5)
ax[i].hist(benign[:,i], bins = bins, alpha = 0.5)
ax[i].set_title(cancer.feature_names[i])
ax[i].set_yticks(())
ax[0].set_xlabel("Feature magnitude")
ax[0].set_ylabel("Frequency")
ax[0].legend(["Malignant", "Benign"], loc ="best")
fig.tight_layout()
plt.show()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print("Test set accuracy from Logistic Regression: {:.2f}".format(logreg.score(X_test,y_test)))
beta = np.random.randn(2,1)
eta = 0.1
Niterations = 100
for iter in range(Niterations):
gradients = 2.0/m*xb.T @ (xb @ (beta)-y)+2*lmbda*beta
beta -= eta*gradients
print(beta)
ypredict = xb @ beta
ypredict2 = xb @ beta_linreg
|
cc0-1.0
|
davidnmurray/iris
|
lib/iris/tests/unit/plot/__init__.py
|
1
|
2524
|
# (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the :mod:`iris.plot` module."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from iris.tests.stock import simple_2d, lat_lon_cube
from iris.coords import AuxCoord
@tests.skip_plot
class TestGraphicStringCoord(tests.GraphicsTest):
def setUp(self):
super(TestGraphicStringCoord, self).setUp()
self.cube = simple_2d(with_bounds=True)
self.cube.add_aux_coord(AuxCoord(list('abcd'),
long_name='str_coord'), 1)
self.lat_lon_cube = lat_lon_cube()
def tick_loc_and_label(self, axis_name, axes=None):
# Intentional lazy import so that subclasses can have an opportunity
# to change the backend.
import matplotlib.pyplot as plt
# Draw the plot to 'fix' the ticks.
if axes:
axes.figure.canvas.draw()
else:
axes = plt.gca()
plt.draw()
axis = getattr(axes, axis_name)
locations = axis.get_majorticklocs()
labels = [tick.get_text() for tick in axis.get_ticklabels()]
return list(zip(locations, labels))
def assertBoundsTickLabels(self, axis, axes=None):
actual = self.tick_loc_and_label(axis, axes)
expected = [(-1.0, ''), (0.0, 'a'), (1.0, 'b'),
(2.0, 'c'), (3.0, 'd'), (4.0, '')]
self.assertEqual(expected, actual)
def assertPointsTickLabels(self, axis, axes=None):
actual = self.tick_loc_and_label(axis, axes)
expected = [(0.0, 'a'), (1.0, 'b'), (2.0, 'c'), (3.0, 'd')]
self.assertEqual(expected, actual)
|
gpl-3.0
|
nicholaschris/landsatpy
|
project_clouds.py
|
1
|
7429
|
import os
import models
import views
import config
import count_clouds
import utils
import cloud_detection
# import cloud_shadow_morphology
import skimage
import numpy as np
from numpy import ma
from matplotlib import pyplot as plt
from math import *
import imp
imp.reload(count_clouds)
pcl = cloud_detection.calc_pcl()
labels, nbr_objects = count_clouds.label_clouds(pcl,3,9)
label_no = 4
data_dir = config.data_dir
path = config.path
row = config.row
time = config.time
band_option = config.band_option
b = band_option
# Scene = models.NetcdfModel(data_dir, path, row, time)
# Scene.get_variables_list()
# btc = utils.convert_to_celsius(Scene.data('BT_B10'))
# btc[np.where(btc<0)] = 0
# btc[np.where(btc>27)] = 27
th0 = Scene.theta_0
phi0 = pi - (Scene.phi_0)
# _bpcl, pcs_buffer = cloud_shadow_morphology.main()
_bpcl = None
def offset_sign(x_offset, y_offset):
if x_offset <= 0:
x_offset_neg, x_offset_pos = x_offset, 0
if y_offset <= 0:
y_offset_neg, y_offset_pos = y_offset, 0
if x_offset > 0:
x_offset_neg, x_offset_pos = 0, x_offset
if y_offset > 0:
y_offset_neg, y_offset_pos = 0, y_offset
return x_offset_neg, x_offset_pos, y_offset_neg, y_offset_pos
def calc_temp_percentiles():
csl_bt = fmask.clear_sky_land_brightness_temp()
t_low, t_high = utils.calculate_percentile(csl_bt, 17.5), utils.calculate_percentile(csl_bt, 82.5)
return t_low, t_high
def create_zeros_array(input_array):
new_shape = input_array.shape
output_array = np.zeros(new_shape)
return output_array
def calc_mean_water_pixel_btc():
water_pixel_btc = ma.masked_where(np.invert(fmask.water_test()), btc)
return ma.mean(water_pixel_btc)
def cloud_base_height(label_no):
cloud_object_btc = ma.masked_where(labels!=label_no, btc)
cloud_height_array = create_zeros_array(cloud_object_btc)
area = ma.count(cloud_object_btc)
radius = np.sqrt(area/(2*np.pi)) # confusion as to whether 2 should be included
if radius >= 8:
# print("Radius is: ", radius)
t_cloudbase = utils.calculate_percentile(cloud_object_btc, (((radius- 8)**2)/radius**2))
# print("Cloudbase temp is: ",t_cloudbase)
else:
t_cloudbase = cloud_object_btc.min()
cloud_object_btc[np.where(cloud_object_btc>t_cloudbase)] = t_cloudbase
# t_low, t_high = utils.calculate_percentile(cloud_object_btc, 17.5), utils.calculate_percentile(cloud_object_btc, 82.5)
h_cloudbase = max(0.2, (t_low-4-t_cloudbase)/9.8), min(12, (t_high +4-t_cloudbase))
t_cloudtop = np.percentile(cloud_object_btc, 12.5) # cloud_object_btc.min() # not clear in paper
h_cloudtop = np.max(np.array(h_cloudbase)) # + 6.5*(t_cloudbase-t_cloudtop)
h_cloud_top = 0.2+(t_high - t_cloudbase)/6.5
# h_cloudtop_two = np.max(np.array(h_cloudbase)) + 6.5*(t_cloudbase-t_cloudtop)
# print(h_cloudbase)
# h_cloudtop = (h_cloudtop_one + h_cloudtop_two) / 2
print(h_cloudbase, h_cloudtop, t_cloudtop, t_cloudbase-t_cloudtop)
# return h_cloudbase, h_cloudbase[1], h_cloudtop, t_cloudtop, t_cloudbase-t_cloudtop
# cloud_height_array[np.where(labels==label_no)] = h_cloudtop_two
return h_cloudtop
def max_x_y_offset(th0, phi0):
d = 12000/30 # cloud_height(label_no)/30
x_offset = - d*tan(th0)*sin(phi0)
y_offset = - d*tan(th0)*cos(phi0)
return x_offset, y_offset
def create_expanded_zone():
x_offset, y_offset = max_x_y_offset(th0, phi0)
amxn, amxp, amyn, amyp = offset_sign(x_offset, y_offset)
amxn, amxp, amyn, amyp = np.int(amxn), np.int(amxp), np.int(amyn), np.int(amyp)
_tmp_shape = -amyn+labels.shape[0]+amyp, -amxn+labels.shape[1]+amxp
shadowy = np.zeros(_tmp_shape)
return shadowy, _tmp_shape
def iter_cloud_x_y_offset(th0, phi0, cloud_height):
x_offset = []
y_offset = []
cloud_height = cloud_height*1000/30
for height_iteration in range(0, np.int(cloud_height)):
x_offset.append(- (1/1)*height_iteration*tan(th0)*sin(phi0))
y_offset.append( - (1/1)*height_iteration*tan(th0)*cos(phi0))
return x_offset, y_offset
def iter_cloud_shift(th0, phi0, cloud_height, x_inds, y_inds, tmp_shape, amxn, amxp, amyn, amyp):
cloud_height = cloud_height*33
print(cloud_height)
_tmp = np.zeros(tmp_shape)
_tmp[y_inds, x_inds] = 1
for height_iteration in range(0, np.int(cloud_height*0.9), 2):
# print(height_iteration)
x_offset_neg, x_offset_pos, y_offset_neg, y_offset_pos = offset_sign((- (1/1)*height_iteration*tan(th0)*sin(phi0)), ( - (1/1)*height_iteration*tan(th0)*cos(phi0)))
xon, xop, yon, yop = np.int(x_offset_neg), np.int(x_offset_pos), np.int(y_offset_neg), np.int(y_offset_pos)
x_shifted = x_inds + xon + xop
y_shifted = y_inds + yon + yop
# print(x_shifted, y_shifted)
# _tmp[y_shifted, x_shifted] = 1 # put in if statement
_tmp_tmp = np.zeros(tmp_shape)
_tmp_tmp[y_shifted, x_shifted] = 1
# mask = ma.masked_where(_tmp_tmp[-amyn:tmp_shape[0]-amyp, -amxn:tmp_shape[1]-amxp ]==1, pcs_buffer)
# mask = np.logical_and(pcs_buffer, _tmp_tmp[-amyn:tmp_shape[0]-amyp, -amxn:tmp_shape[1]-amxp ]) # NB
# mask = pcs_buffer + _tmp_tmp[-amyn:tmp_shape[0]-amyp, -amxn:tmp_shape[1]-amxp ]
# mask = ma.masked_where(np.invert(mask), mask) # NB
# print(ma.count(mask), np.count_nonzero(_tmp))
# if ma.count(mask) > np.count_nonzero(_tmp)/4: #NB
# print("Match")
# _tmp[y_shifted, x_shifted] = 1
_tmp[y_shifted, x_shifted] = 1
return _tmp
def iter_shadowmaker(labels, nbr_objects):
x_offset, y_offset = max_x_y_offset(th0, phi0)
amxn, amxp, amyn, amyp = offset_sign(x_offset, y_offset)
amxn, amxp, amyn, amyp = np.int(amxn), np.int(amxp), np.int(amyn), np.int(amyp)
_tmp_shape = -amyn+labels.shape[0]+amyp, -amxn+labels.shape[1]+amxp
shadowy = np.zeros(_tmp_shape)
for label_no in range(1, (nbr_objects+1)):
print("Label No: ", label_no)
cloud_object_inds = np.where(labels==label_no)
cloud_h = 12 # cloud_base_height(label_no)
cloud_h = 6 # Speed things up
print("Cloud Height: ", cloud_h)
x_inds = cloud_object_inds[1] - amxn
y_inds = cloud_object_inds[0] - amyn
tmp = iter_cloud_shift(th0, phi0, cloud_h, x_inds, y_inds, _tmp_shape, amxn, amxp, amyn, amyp)
shadowy += tmp
tmp = None
return shadowy[-amyn:_tmp_shape[0]-amyp, -amxn:_tmp_shape[1]-amxp ]
print("There are %s clouds." % nbr_objects)
mean_water_pixel_btc = calc_mean_water_pixel_btc()
t_low, t_high = calc_temp_percentiles()
# cloud_height_array = create_zeros_array(labels)
# cloud_height_list=[]
# for label_no in range(1, nbr_objects+1):
# print("Cloud Number ", label_no)
# cloud_height_array += cloud_base_height(label_no)
# cloud_height_list.append(cloud_base_height(label_no))
if __name__ == "__main__":
shadowy = iter_shadowmaker(labels, nbr_objects)
# utils.save_object(pcs_buffer, fmask.Scene.dir_name+ '/' + band_option +'shadowy.pkl')
# os.chdir(os.path.dirname(__file__))
# print(os.getcwd())
# cur_dir = os.getcwd()
# cur_dir = os.path.join(os.getcwd(), "")
# fig_dir = cur_dir + 'results/figures/'
# theCMG = views.create_cm_greys()
# plt.close('all')
# plt.imshow(shadowy, cmap = theCMG)
# plt.show()
|
mit
|
hsuantien/scikit-learn
|
examples/linear_model/plot_multi_task_lasso_support.py
|
249
|
2211
|
#!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
|
bsd-3-clause
|
untom/scikit-learn
|
sklearn/utils/tests/test_shortest_path.py
|
88
|
2828
|
from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix += dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
|
bsd-3-clause
|
KellyChan/Python
|
python/sklearn/examples/general/test_the_significance.py
|
3
|
2304
|
#---------------------------------------------------------------#
# Project: Test with permutations the significance of a classification score
# Author: Kelly Chan
# Date: Apr 23 2014
#---------------------------------------------------------------#
print(__doc__)
import numpy as np
import pylab as pl
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
def loadData():
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
return X, y, n_classes
def addNoise(X):
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
X = np.c_[X, E]
return X
def crossValidation(y):
cv = StratifiedKFold(y, 2)
return cv
def createSVM():
svm = SVC(kernel='linear')
return svm
def computeScore(svm, X, y, cv):
score, permutation_scores, pvalue = permutation_test_score(svm, \
X, y, \
scoring='accuracy', \
cv=cv, \
n_permutations=100, \
n_jobs=1)
print("Classification score %s (pvalue: %s)" % (score, pvalue))
return score, permutation_scores, pvalue
def plotHist(score, permutation_scores, pvalue, n_classes):
pl.hist(permutation_scores, 20, label='Permutation scores')
ylim = pl.ylim()
pl.plot(2 * [score], ylim, '--g', \
linewidth=3, \
label='Classification Score (pvalue %s)' % pvalue)
pl.plot(2 * [1. / n_classes], ylim, '--k', \
linewidth=3, \
label='Luck')
pl.ylim(ylim)
pl.legend()
pl.xlabel('Score')
pl.show()
def test():
X, y, n_classes = loadData()
X = addNoise(X)
cv = crossValidation(y)
svm = createSVM()
score, permutation_scores, pvalue = computeScore(svm, X, y, cv)
plotHist(score, permutation_scores, pvalue, n_classes)
if __name__ == '__main__':
test()
|
mit
|
kambysese/mne-python
|
examples/stats/plot_cluster_stats_evoked.py
|
18
|
3021
|
"""
=======================================================
Permutation F-test on sensor data with 1D cluster level
=======================================================
One tests if the evoked response is significantly different
between conditions. Multiple comparison problem is addressed
with cluster level permutation test.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition1 = epochs1.get_data() # as 3D matrix
event_id = 2
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition2 = epochs2.get_data() # as 3D matrix
condition1 = condition1[:, 0, :] # take only one channel to get a 2D array
condition2 = condition2[:, 0, :] # take only one channel to get a 2D array
###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([condition1, condition2], n_permutations=1000,
threshold=threshold, tail=1, n_jobs=1,
out_type='mask')
###############################################################################
# Plot
times = epochs1.times
plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + channel)
plt.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),
label="ERF Contrast (Event 1 - Event 2)")
plt.ylabel("MEG (T / m)")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
c = c[0]
if cluster_p_values[i_c] <= 0.05:
h = plt.axvspan(times[c.start], times[c.stop - 1],
color='r', alpha=0.3)
else:
plt.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),
alpha=0.3)
hf = plt.plot(times, T_obs, 'g')
plt.legend((h, ), ('cluster p-value < 0.05', ))
plt.xlabel("time (ms)")
plt.ylabel("f-values")
plt.show()
|
bsd-3-clause
|
leofdecarvalho/MachineLearning
|
2. Modeling/2. Classification/10. SVM/svm.py
|
6
|
2607
|
# Support Vector Machine (SVM)
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting SVM to the Training set
from sklearn.svm import SVC
classifier = SVC(kernel = 'linear', random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('SVM (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
|
mit
|
mjudsp/Tsallis
|
benchmarks/bench_sample_without_replacement.py
|
397
|
8008
|
"""
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
|
bsd-3-clause
|
shusenl/scikit-learn
|
examples/svm/plot_svm_margin.py
|
318
|
2328
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
|
bsd-3-clause
|
yanchen036/tensorflow
|
tensorflow/contrib/timeseries/examples/predict.py
|
69
|
5579
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training and predicting with a TFTS estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
FLAGS = None
def structural_ensemble_train_and_predict(csv_file_name):
# Cycle between 5 latent values over a period of 100. This leads to a very
# smooth periodic component (and a small model), which is a good fit for our
# example data. Modeling high-frequency periodic variations will require a
# higher cycle_num_latent_values.
structural = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=100, num_features=1, cycle_num_latent_values=5)
return train_and_predict(structural, csv_file_name, training_steps=150)
def ar_train_and_predict(csv_file_name):
# An autoregressive model, with periodicity handled as a time-based
# regression. Note that this requires windows of size 16 (input_window_size +
# output_window_size) for training.
ar = tf.contrib.timeseries.ARRegressor(
periodicities=100, input_window_size=10, output_window_size=6,
num_features=1,
# Use the (default) normal likelihood loss to adaptively fit the
# variance. SQUARED_LOSS overestimates variance when there are trends in
# the series.
loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
return train_and_predict(ar, csv_file_name, training_steps=600)
def train_and_predict(estimator, csv_file_name, training_steps):
"""A simple example of training and predicting."""
# Read data in the default "time,value" CSV format with no header
reader = tf.contrib.timeseries.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=training_steps)
# Evaluate on the full dataset sequentially, collecting in-sample predictions
# for a qualitative evaluation. Note that this loads the whole dataset into
# memory. For quantitative evaluation, use RandomWindowChunker.
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=200)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
return times, observed, all_times, mean, upper_limit, lower_limit
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit):
"""Plot a time series in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.plot(all_times, upper_limit, "g", label="forecast upper bound")
pyplot.plot(all_times, lower_limit, "g", label="forecast lower bound")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Structural ensemble",
*structural_ensemble_train_and_predict(FLAGS.input_filename))
make_plot("AR", *ar_train_and_predict(FLAGS.input_filename))
pyplot.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_filename",
type=str,
required=True,
help="Input csv file.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
flowmatters/taudem-py
|
taudem/utils.py
|
1
|
3675
|
import os
import numpy as _np
import osgeo.gdal as _gd
import shutil
_NUMPY_TO_GDAL_TYPES={
_np.dtype('f'):_gd.GDT_Float32,
_np.dtype('d'):_gd.GDT_Float64,
_np.dtype('int16'):_gd.GDT_Int16,
_np.dtype('int32'):_gd.GDT_Int32
}
class MetadataArray(_np.ndarray):
def __new__(cls, input_array, **kwargs):
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = _np.asarray(input_array).view(cls)
# add the new attribute to the created instance
obj.metadata = kwargs
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
if obj is None: return
self.metadata = getattr(obj, 'metadata', None)
def to_geotiff(arr,gt,fn):
driver = _gd.GetDriverByName('GTiff')
outRaster = driver.Create(fn, arr.shape[1], arr.shape[0], 1, _NUMPY_TO_GDAL_TYPES[arr.dtype])
if gt is not None:
if hasattr(gt,'to_gdal'):
gt = gt.to_gdal()
outRaster.SetGeoTransform(gt)
outband = outRaster.GetRasterBand(1)
if hasattr(arr,'metadata'):
outband.SetNoDataValue(arr.metadata.get('no_data_value',None))
outband.WriteArray(arr)
# outRasterSRS = osr.SpatialReference()
# outRasterSRS.ImportFromEPSG(4326)
# outRaster.SetProjection(outRasterSRS.ExportToWkt())
outband.FlushCache()
def to_point_shp(points,fn):
if hasattr(points,'to_file'):
points.to_file(fn)
return
raise Exception('Unable to write shapefile. Unknown data representation.')
def clip(raster,polygons,all_touched=True):
import rasterio as rio
geom_bounds = tuple(polygons.bounds)
fsrc = raster.read(bounds=geom_bounds)
coverage_rst = rasterize_geom(geom,like=fsrc,all_touched=all_touched)
masked = np.ma.MaskedArray(fsrc.arry,mask=np.logical_or(fsrc.array==fsrc.nodata,np.logical_not(coverage_rst)))
return masked
def to_polygons(raster,shp_fn=None,transform=None):
from osgeo import ogr
tmp=None
if not shp_fn or not isinstance(raster,str):
import tempfile
tmp = tempfile.mkdtemp(prefix='taudem_')
if not isinstance(raster,str):
tmp_fn = os.path.join(tmp,'raster.tif')
to_geotiff(raster,transform,tmp_fn)
raster = tmp_fn
if not shp_fn:
shp_fn = os.path.join(tmp,'polygons.shp')
try:
if os.path.exists(shp_fn):
os.remove(shp_fn)
drv = ogr.GetDriverByName("ESRI Shapefile")
dst_ds = drv.CreateDataSource( shp_fn )
dst_layer = dst_ds.CreateLayer(os.path.basename(shp_fn)[:-4], srs = None )
newField = ogr.FieldDefn('GRIDCODE', ogr.OFTInteger)
dst_layer.CreateField(newField)
coverage = _gd.Open(raster)
band = coverage.GetRasterBand(1)
result = _gd.Polygonize( band, band.GetMaskBand(), dst_layer, 0, [], callback=None )
dst_ds.SyncToDisk()
if tmp:
import geopandas as gpd
return gpd.read_file(shp_fn)
finally:
if tmp:shutil.rmtree(tmp)
# from http://stackoverflow.com/a/377028
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
|
isc
|
trogdorsey/data_hacking
|
dga_detection/dga_model_gen.py
|
6
|
13951
|
''' Build models to detect Algorithmically Generated Domain Names (DGA).
We're trying to classify domains as being 'legit' or having a high probability
of being generated by a DGA (Dynamic Generation Algorithm). We have 'legit' in
quotes as we're using the domains in Alexa as the 'legit' set.
'''
import os, sys
import traceback
import json
import optparse
import pickle
import collections
import sklearn
import sklearn.feature_extraction
import sklearn.ensemble
import sklearn.metrics
import pandas as pd
import numpy as np
import tldextract
import math
# Version printing is always a good idea
print 'Scikit Learn version: %s' % sklearn.__version__
print 'Pandas version: %s' % pd.__version__
print 'TLDExtract version: %s' % tldextract.__version__
# Version 0.12.0 of Pandas has a DeprecationWarning about Height blah that I'm ignoring
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Okay for this model we need the 2LD and nothing else
def domain_extract(uri):
ext = tldextract.extract(uri)
if (not ext.suffix):
return None
else:
return ext.domain
# Entropy calc (this must match model_eval)
def entropy(s):
p, lns = collections.Counter(s), float(len(s))
return -sum( count/lns * math.log(count/lns, 2) for count in p.values())
def show_cm(cm, labels):
# Compute percentanges
percent = (cm*100.0)/np.array(np.matrix(cm.sum(axis=1)).T) # Derp, I'm sure there's a better way
print 'Confusion Matrix Stats'
for i, label_i in enumerate(labels):
for j, label_j in enumerate(labels):
print "%s/%s: %.2f%% (%d/%d)" % (label_i, label_j, (percent[i][j]), cm[i][j], cm[i].sum())
def save_model_to_disk(name, model, model_dir='models'):
''' Serialize and save a model to disk'''
# First serialized the model
serialized_model = pickle.dumps(model, protocol=pickle.HIGHEST_PROTOCOL)
# Model directory + model name
model_path = os.path.join(model_dir, name+'.model')
# Now store it to disk
print 'Storing Serialized Model to Disk (%s:%.2fMeg)' % (name, len(serialized_model)/1024.0/1024.0)
open(model_path,'wb').write(serialized_model)
def load_model_from_disk(name, model_dir='models'):
# Model directory is relative to this file
model_path = os.path.join(model_dir, name+'.model')
# Put a try/except around the model load in case it fails
try:
model = pickle.loads(open(model_path,'rb').read())
except:
print 'Could not load model: %s from directory %s!' % (name, model_path)
return None
return model
def main():
''' Main method, takes care of loading data, running it through the various analyses
and reporting the results
'''
# Handle command-line arguments
parser = optparse.OptionParser()
parser.add_option('--alexa-file', default='data/alexa_100k.csv', help='Alexa file to pull from. Default: %default')
(options, arguments) = parser.parse_args()
print options, arguments
try: # Pokemon exception handling
# This is the Alexa 1M domain list.
print 'Loading alexa dataframe...'
alexa_dataframe = pd.read_csv(options.alexa_file, names=['rank','uri'], header=None, encoding='utf-8')
print alexa_dataframe.info()
print alexa_dataframe.head()
# Compute the 2LD of the domain given by Alexa
alexa_dataframe['domain'] = [ domain_extract(uri) for uri in alexa_dataframe['uri']]
del alexa_dataframe['rank']
del alexa_dataframe['uri']
alexa_dataframe = alexa_dataframe.dropna()
alexa_dataframe = alexa_dataframe.drop_duplicates()
print alexa_dataframe.head()
# Set the class
alexa_dataframe['class'] = 'legit'
# Shuffle the data (important for training/testing)
alexa_dataframe = alexa_dataframe.reindex(np.random.permutation(alexa_dataframe.index))
alexa_total = alexa_dataframe.shape[0]
print 'Total Alexa domains %d' % alexa_total
# Read in the DGA domains
dga_dataframe = pd.read_csv('data/dga_domains.txt', names=['raw_domain'], header=None, encoding='utf-8')
# We noticed that the blacklist values just differ by captilization or .com/.org/.info
dga_dataframe['domain'] = dga_dataframe.applymap(lambda x: x.split('.')[0].strip().lower())
del dga_dataframe['raw_domain']
# It's possible we have NaNs from blanklines or whatever
dga_dataframe = dga_dataframe.dropna()
dga_dataframe = dga_dataframe.drop_duplicates()
dga_total = dga_dataframe.shape[0]
print 'Total DGA domains %d' % dga_total
# Set the class
dga_dataframe['class'] = 'dga'
print 'Number of DGA domains: %d' % dga_dataframe.shape[0]
print dga_dataframe.head()
# Concatenate the domains in a big pile!
all_domains = pd.concat([alexa_dataframe, dga_dataframe], ignore_index=True)
# Add a length field for the domain
all_domains['length'] = [len(x) for x in all_domains['domain']]
# Okay since we're trying to detect dynamically generated domains and short
# domains (length <=6) are crazy random even for 'legit' domains we're going
# to punt on short domains (perhaps just white/black list for short domains?)
all_domains = all_domains[all_domains['length'] > 6]
# Add a entropy field for the domain
all_domains['entropy'] = [entropy(x) for x in all_domains['domain']]
print all_domains.head()
# Now we compute NGrams for every Alexa domain and see if we can use the
# NGrams to help us better differentiate and mark DGA domains...
# Scikit learn has a nice NGram generator that can generate either char NGrams or word NGrams (we're using char).
# Parameters:
# - ngram_range=(3,5) # Give me all ngrams of length 3, 4, and 5
# - min_df=1e-4 # Minimumum document frequency. At 1e-4 we're saying give us NGrams that
# # happen in at least .1% of the domains (so for 100k... at least 100 domains)
alexa_vc = sklearn.feature_extraction.text.CountVectorizer(analyzer='char', ngram_range=(3,5), min_df=1e-4, max_df=1.0)
# I'm SURE there's a better way to store all the counts but not sure...
# At least the min_df parameters has already done some thresholding
counts_matrix = alexa_vc.fit_transform(alexa_dataframe['domain'])
alexa_counts = np.log10(counts_matrix.sum(axis=0).getA1())
ngrams_list = alexa_vc.get_feature_names()
# For fun sort it and show it
import operator
_sorted_ngrams = sorted(zip(ngrams_list, alexa_counts), key=operator.itemgetter(1), reverse=True)
print 'Alexa NGrams: %d' % len(_sorted_ngrams)
for ngram, count in _sorted_ngrams[:10]:
print ngram, count
# We're also going to throw in a bunch of dictionary words
word_dataframe = pd.read_csv('data/words.txt', names=['word'], header=None, dtype={'word': np.str}, encoding='utf-8')
# Cleanup words from dictionary
word_dataframe = word_dataframe[word_dataframe['word'].map(lambda x: str(x).isalpha())]
word_dataframe = word_dataframe.applymap(lambda x: str(x).strip().lower())
word_dataframe = word_dataframe.dropna()
word_dataframe = word_dataframe.drop_duplicates()
print word_dataframe.head(10)
# Now compute NGrams on the dictionary words
# Same logic as above...
dict_vc = sklearn.feature_extraction.text.CountVectorizer(analyzer='char', ngram_range=(3,5), min_df=1e-5, max_df=1.0)
counts_matrix = dict_vc.fit_transform(word_dataframe['word'])
dict_counts = np.log10(counts_matrix.sum(axis=0).getA1())
ngrams_list = dict_vc.get_feature_names()
# For fun sort it and show it
import operator
_sorted_ngrams = sorted(zip(ngrams_list, dict_counts), key=operator.itemgetter(1), reverse=True)
print 'Word NGrams: %d' % len(_sorted_ngrams)
for ngram, count in _sorted_ngrams[:10]:
print ngram, count
# We use the transform method of the CountVectorizer to form a vector
# of ngrams contained in the domain, that vector is than multiplied
# by the counts vector (which is a column sum of the count matrix).
def ngram_count(domain):
alexa_match = alexa_counts * alexa_vc.transform([domain]).T # Woot vector multiply and transpose Woo Hoo!
dict_match = dict_counts * dict_vc.transform([domain]).T
print '%s Alexa match:%d Dict match: %d' % (domain, alexa_match, dict_match)
# Examples:
ngram_count('google')
ngram_count('facebook')
ngram_count('1cb8a5f36f')
ngram_count('pterodactylfarts')
ngram_count('ptes9dro-dwacty2lfa5rrts')
ngram_count('beyonce')
ngram_count('bey666on4ce')
# Compute NGram matches for all the domains and add to our dataframe
all_domains['alexa_grams']= alexa_counts * alexa_vc.transform(all_domains['domain']).T
all_domains['word_grams']= dict_counts * dict_vc.transform(all_domains['domain']).T
print all_domains.head()
# Use the vectorized operations of the dataframe to investigate differences
# between the alexa and word grams
all_domains['diff'] = all_domains['alexa_grams'] - all_domains['word_grams']
# The table below shows those domain names that are more 'dictionary' and less 'web'
print all_domains.sort(['diff'], ascending=True).head(10)
# The table below shows those domain names that are more 'web' and less 'dictionary'
# Good O' web....
print all_domains.sort(['diff'], ascending=False).head(50)
# Lets look at which Legit domains are scoring low on both alexa and word gram count
weird_cond = (all_domains['class']=='legit') & (all_domains['word_grams']<3) & (all_domains['alexa_grams']<2)
weird = all_domains[weird_cond]
print weird.shape[0]
print weird.head(10)
# Epiphany... Alexa really may not be the best 'exemplar' set...
# (probably a no-shit moment for everyone else :)
#
# Discussion: If you're using these as exemplars of NOT DGA, then your probably
# making things very hard on your machine learning algorithm.
# Perhaps we should have two categories of Alexa domains, 'legit'
# and a 'weird'. based on some definition of weird.
# Looking at the entries above... we have approx 80 domains
# that we're going to mark as 'weird'.
#
all_domains.loc[weird_cond, 'class'] = 'weird'
print all_domains['class'].value_counts()
all_domains[all_domains['class'] == 'weird'].head()
# Perhaps we will just exclude the weird class from our ML training
not_weird = all_domains[all_domains['class'] != 'weird']
X = not_weird.as_matrix(['length', 'entropy', 'alexa_grams', 'word_grams'])
# Labels (scikit learn uses 'y' for classification labels)
y = np.array(not_weird['class'].tolist())
# Random Forest is a popular ensemble machine learning classifier.
# http://scikit-learn.org/dev/modules/generated/sklearn.ensemble.RandomForestClassifier.html
clf = sklearn.ensemble.RandomForestClassifier(n_estimators=20, compute_importances=True) # Trees in the forest
# Train on a 80/20 split
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Now plot the results of the holdout set in a confusion matrix
labels = ['legit', 'dga']
cm = sklearn.metrics.confusion_matrix(y_test, y_pred, labels)
show_cm(cm, labels)
# We can also look at what features the learning algorithm thought were the most important
importances = zip(['length', 'entropy', 'alexa_grams', 'word_grams'], clf.feature_importances_)
print importances
# Now train on the whole thing before doing tests and saving models to disk
clf.fit(X, y)
# test_it shows how to do evaluation, also fun for manual testing below :)
def test_it(domain):
_alexa_match = alexa_counts * alexa_vc.transform([domain]).T # Woot matrix multiply and transpose Woo Hoo!
_dict_match = dict_counts * dict_vc.transform([domain]).T
_X = [len(domain), entropy(domain), _alexa_match, _dict_match]
print '%s : %s' % (domain, clf.predict(_X)[0])
# Examples (feel free to change these and see the results!)
test_it('google')
test_it('google88')
test_it('facebook')
test_it('1cb8a5f36f')
test_it('pterodactylfarts')
test_it('ptes9dro-dwacty2lfa5rrts')
test_it('beyonce')
test_it('bey666on4ce')
test_it('supersexy')
test_it('yourmomissohotinthesummertime')
test_it('35-sdf-09jq43r')
test_it('clicksecurity')
# Serialize model to disk
save_model_to_disk('dga_model_random_forest', clf)
save_model_to_disk('dga_model_alexa_vectorizor', alexa_vc)
save_model_to_disk('dga_model_alexa_counts', alexa_counts)
save_model_to_disk('dga_model_dict_vectorizor', dict_vc)
save_model_to_disk('dga_model_dict_counts', dict_counts)
except KeyboardInterrupt:
print 'Goodbye Cruel World...'
sys.exit(0)
except Exception, error:
traceback.print_exc()
print '(Exception):, %s' % (str(error))
sys.exit(1)
if __name__ == '__main__':
main()
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.